mirror of
https://github.com/ente-io/ente.git
synced 2025-08-08 15:30:40 +00:00
[server] Ignore uploadUrl limit while copying files
This commit is contained in:
parent
74901999ad
commit
0dcc3c974c
@ -110,7 +110,7 @@ func (h *FileHandler) GetUploadURLs(c *gin.Context) {
|
|||||||
|
|
||||||
userID := auth.GetUserID(c.Request.Header)
|
userID := auth.GetUserID(c.Request.Header)
|
||||||
count, _ := strconv.Atoi(c.Query("count"))
|
count, _ := strconv.Atoi(c.Query("count"))
|
||||||
urls, err := h.Controller.GetUploadURLs(c, userID, count, enteApp)
|
urls, err := h.Controller.GetUploadURLs(c, userID, count, enteApp, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.Error(c, stacktrace.Propagate(err, ""))
|
handler.Error(c, stacktrace.Propagate(err, ""))
|
||||||
return
|
return
|
||||||
|
@ -57,7 +57,7 @@ func (h *PublicCollectionHandler) GetUploadUrls(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
userID := collection.Owner.ID
|
userID := collection.Owner.ID
|
||||||
count, _ := strconv.Atoi(c.Query("count"))
|
count, _ := strconv.Atoi(c.Query("count"))
|
||||||
urls, err := h.FileCtrl.GetUploadURLs(c, userID, count, enteApp)
|
urls, err := h.FileCtrl.GetUploadURLs(c, userID, count, enteApp, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.Error(c, stacktrace.Propagate(err, ""))
|
handler.Error(c, stacktrace.Propagate(err, ""))
|
||||||
return
|
return
|
||||||
|
@ -258,7 +258,7 @@ func (c *FileController) Update(ctx context.Context, userID int64, file ente.Fil
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetUploadURLs returns a bunch of presigned URLs for uploading files
|
// GetUploadURLs returns a bunch of presigned URLs for uploading files
|
||||||
func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count int, app ente.App) ([]ente.UploadURL, error) {
|
func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count int, app ente.App, ignoreLimit bool) ([]ente.UploadURL, error) {
|
||||||
err := c.UsageCtrl.CanUploadFile(ctx, userID, nil, app)
|
err := c.UsageCtrl.CanUploadFile(ctx, userID, nil, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []ente.UploadURL{}, stacktrace.Propagate(err, "")
|
return []ente.UploadURL{}, stacktrace.Propagate(err, "")
|
||||||
@ -268,7 +268,7 @@ func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count
|
|||||||
bucket := c.S3Config.GetHotBucket()
|
bucket := c.S3Config.GetHotBucket()
|
||||||
urls := make([]ente.UploadURL, 0)
|
urls := make([]ente.UploadURL, 0)
|
||||||
objectKeys := make([]string, 0)
|
objectKeys := make([]string, 0)
|
||||||
if count > MaxUploadURLsLimit {
|
if count > MaxUploadURLsLimit && !ignoreLimit {
|
||||||
count = MaxUploadURLsLimit
|
count = MaxUploadURLsLimit
|
||||||
}
|
}
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
|
@ -92,7 +92,7 @@ func (fc *FileCopyController) CopyFiles(c *gin.Context, req ente.CopyFileSyncReq
|
|||||||
|
|
||||||
// request the uploadUrls using existing method. This is to ensure that orphan objects are automatically cleaned up
|
// request the uploadUrls using existing method. This is to ensure that orphan objects are automatically cleaned up
|
||||||
// todo:(neeraj) optimize this method by removing the need for getting a signed url for each object
|
// todo:(neeraj) optimize this method by removing the need for getting a signed url for each object
|
||||||
uploadUrls, err := fc.FileController.GetUploadURLs(c, userID, len(s3ObjectsToCopy), app)
|
uploadUrls, err := fc.FileController.GetUploadURLs(c, userID, len(s3ObjectsToCopy), app, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user