2023-05-19 15:37:57 +02:00
|
|
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
|
|
|
|
package actions
|
|
|
|
|
2023-05-21 03:50:53 +02:00
|
|
|
// GitHub Actions Artifacts API Simple Description
|
2023-05-19 15:37:57 +02:00
|
|
|
//
|
|
|
|
// 1. Upload artifact
|
|
|
|
// 1.1. Post upload url
|
|
|
|
// Post: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts?api-version=6.0-preview
|
|
|
|
// Request:
|
|
|
|
// {
|
|
|
|
// "Type": "actions_storage",
|
|
|
|
// "Name": "artifact"
|
|
|
|
// }
|
|
|
|
// Response:
|
|
|
|
// {
|
|
|
|
// "fileContainerResourceUrl":"/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload"
|
|
|
|
// }
|
|
|
|
// it acquires an upload url for artifact upload
|
|
|
|
// 1.2. Upload artifact
|
|
|
|
// PUT: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload?itemPath=artifact%2Ffilename
|
|
|
|
// it upload chunk with headers:
|
|
|
|
// x-tfs-filelength: 1024 // total file length
|
|
|
|
// content-length: 1024 // chunk length
|
|
|
|
// x-actions-results-md5: md5sum // md5sum of chunk
|
|
|
|
// content-range: bytes 0-1023/1024 // chunk range
|
|
|
|
// we save all chunks to one storage directory after md5sum check
|
|
|
|
// 1.3. Confirm upload
|
|
|
|
// PATCH: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/upload?itemPath=artifact%2Ffilename
|
|
|
|
// it confirm upload and merge all chunks to one file, save this file to storage
|
|
|
|
//
|
|
|
|
// 2. Download artifact
|
|
|
|
// 2.1 list artifacts
|
|
|
|
// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts?api-version=6.0-preview
|
|
|
|
// Response:
|
|
|
|
// {
|
|
|
|
// "count": 1,
|
|
|
|
// "value": [
|
|
|
|
// {
|
|
|
|
// "name": "artifact",
|
|
|
|
// "fileContainerResourceUrl": "/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/path"
|
|
|
|
// }
|
|
|
|
// ]
|
|
|
|
// }
|
|
|
|
// 2.2 download artifact
|
|
|
|
// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/path?api-version=6.0-preview
|
|
|
|
// Response:
|
|
|
|
// {
|
|
|
|
// "value": [
|
|
|
|
// {
|
|
|
|
// "contentLocation": "/api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/download",
|
|
|
|
// "path": "artifact/filename",
|
|
|
|
// "itemType": "file"
|
|
|
|
// }
|
|
|
|
// ]
|
|
|
|
// }
|
|
|
|
// 2.3 download artifact file
|
|
|
|
// GET: /api/actions_pipeline/_apis/pipelines/workflows/{run_id}/artifacts/{artifact_id}/download?itemPath=artifact%2Ffilename
|
|
|
|
// Response:
|
|
|
|
// download file
|
|
|
|
//
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/md5"
|
2024-02-15 21:39:50 +01:00
|
|
|
"errors"
|
2023-05-19 15:37:57 +02:00
|
|
|
"fmt"
|
|
|
|
"net/http"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
|
|
|
|
"code.gitea.io/gitea/models/actions"
|
2023-11-24 04:49:41 +01:00
|
|
|
"code.gitea.io/gitea/models/db"
|
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 10:30:16 +02:00
|
|
|
quota_model "code.gitea.io/gitea/models/quota"
|
2023-05-19 15:37:57 +02:00
|
|
|
"code.gitea.io/gitea/modules/json"
|
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
|
|
|
"code.gitea.io/gitea/modules/storage"
|
|
|
|
"code.gitea.io/gitea/modules/util"
|
|
|
|
"code.gitea.io/gitea/modules/web"
|
2023-06-18 09:59:09 +02:00
|
|
|
web_types "code.gitea.io/gitea/modules/web/types"
|
2024-02-02 15:25:59 +01:00
|
|
|
actions_service "code.gitea.io/gitea/services/actions"
|
2024-02-27 08:12:22 +01:00
|
|
|
"code.gitea.io/gitea/services/context"
|
2023-05-19 15:37:57 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const artifactRouteBase = "/_apis/pipelines/workflows/{run_id}/artifacts"
|
|
|
|
|
2023-05-21 03:50:53 +02:00
|
|
|
type artifactContextKeyType struct{}
|
|
|
|
|
|
|
|
var artifactContextKey = artifactContextKeyType{}
|
|
|
|
|
|
|
|
type ArtifactContext struct {
|
|
|
|
*context.Base
|
|
|
|
|
|
|
|
ActionTask *actions.ActionTask
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2023-06-18 09:59:09 +02:00
|
|
|
web.RegisterResponseStatusProvider[*ArtifactContext](func(req *http.Request) web_types.ResponseStatusProvider {
|
2023-05-21 03:50:53 +02:00
|
|
|
return req.Context().Value(artifactContextKey).(*ArtifactContext)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func ArtifactsRoutes(prefix string) *web.Route {
|
2023-05-19 15:37:57 +02:00
|
|
|
m := web.NewRoute()
|
2023-05-21 03:50:53 +02:00
|
|
|
m.Use(ArtifactContexter())
|
2023-05-19 15:37:57 +02:00
|
|
|
|
|
|
|
r := artifactRoutes{
|
|
|
|
prefix: prefix,
|
|
|
|
fs: storage.ActionsArtifacts,
|
|
|
|
}
|
|
|
|
|
|
|
|
m.Group(artifactRouteBase, func() {
|
|
|
|
// retrieve, list and confirm artifacts
|
|
|
|
m.Combo("").Get(r.listArtifacts).Post(r.getUploadArtifactURL).Patch(r.comfirmUploadArtifact)
|
|
|
|
// handle container artifacts list and download
|
2023-07-21 04:42:01 +02:00
|
|
|
m.Put("/{artifact_hash}/upload", r.uploadArtifact)
|
|
|
|
// handle artifacts download
|
|
|
|
m.Get("/{artifact_hash}/download_url", r.getDownloadArtifactURL)
|
|
|
|
m.Get("/{artifact_id}/download", r.downloadArtifact)
|
2023-05-19 15:37:57 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
2023-05-21 03:50:53 +02:00
|
|
|
func ArtifactContexter() func(next http.Handler) http.Handler {
|
2023-05-19 15:37:57 +02:00
|
|
|
return func(next http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
|
2023-05-21 03:50:53 +02:00
|
|
|
base, baseCleanUp := context.NewBaseContext(resp, req)
|
|
|
|
defer baseCleanUp()
|
|
|
|
|
|
|
|
ctx := &ArtifactContext{Base: base}
|
|
|
|
ctx.AppendContextValue(artifactContextKey, ctx)
|
2023-05-19 15:37:57 +02:00
|
|
|
|
|
|
|
// action task call server api with Bearer ACTIONS_RUNTIME_TOKEN
|
|
|
|
// we should verify the ACTIONS_RUNTIME_TOKEN
|
|
|
|
authHeader := req.Header.Get("Authorization")
|
|
|
|
if len(authHeader) == 0 || !strings.HasPrefix(authHeader, "Bearer ") {
|
|
|
|
ctx.Error(http.StatusUnauthorized, "Bad authorization header")
|
|
|
|
return
|
|
|
|
}
|
2023-05-21 03:50:53 +02:00
|
|
|
|
2024-02-02 15:25:59 +01:00
|
|
|
// New act_runner uses jwt to authenticate
|
|
|
|
tID, err := actions_service.ParseAuthorizationToken(req)
|
|
|
|
|
|
|
|
var task *actions.ActionTask
|
|
|
|
if err == nil {
|
|
|
|
task, err = actions.GetTaskByID(req.Context(), tID)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Error runner api getting task by ID: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error runner api getting task by ID")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if task.Status != actions.StatusRunning {
|
|
|
|
log.Error("Error runner api getting task: task is not running")
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Old act_runner uses GITEA_TOKEN to authenticate
|
|
|
|
authToken := strings.TrimPrefix(authHeader, "Bearer ")
|
|
|
|
|
|
|
|
task, err = actions.GetRunningTaskByToken(req.Context(), authToken)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Error runner api getting task: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error runner api getting task")
|
|
|
|
return
|
|
|
|
}
|
2023-05-19 15:37:57 +02:00
|
|
|
}
|
|
|
|
|
2023-05-21 03:50:53 +02:00
|
|
|
if err := task.LoadJob(req.Context()); err != nil {
|
2023-05-19 15:37:57 +02:00
|
|
|
log.Error("Error runner api getting job: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error runner api getting job")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-05-21 03:50:53 +02:00
|
|
|
ctx.ActionTask = task
|
2023-05-19 15:37:57 +02:00
|
|
|
next.ServeHTTP(ctx.Resp, ctx.Req)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type artifactRoutes struct {
|
|
|
|
prefix string
|
|
|
|
fs storage.ObjectStorage
|
|
|
|
}
|
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
func (ar artifactRoutes) buildArtifactURL(runID int64, artifactHash, suffix string) string {
|
2023-05-19 15:37:57 +02:00
|
|
|
uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(ar.prefix, "/") +
|
|
|
|
strings.ReplaceAll(artifactRouteBase, "{run_id}", strconv.FormatInt(runID, 10)) +
|
2023-07-21 04:42:01 +02:00
|
|
|
"/" + artifactHash + "/" + suffix
|
2023-05-19 15:37:57 +02:00
|
|
|
return uploadURL
|
|
|
|
}
|
|
|
|
|
|
|
|
type getUploadArtifactRequest struct {
|
2023-09-06 09:41:06 +02:00
|
|
|
Type string
|
|
|
|
Name string
|
|
|
|
RetentionDays int64
|
2023-05-19 15:37:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type getUploadArtifactResponse struct {
|
|
|
|
FileContainerResourceURL string `json:"fileContainerResourceUrl"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// getUploadArtifactURL generates a URL for uploading an artifact
|
2023-05-21 03:50:53 +02:00
|
|
|
func (ar artifactRoutes) getUploadArtifactURL(ctx *ArtifactContext) {
|
2023-07-21 04:42:01 +02:00
|
|
|
_, runID, ok := validateRunID(ctx)
|
2023-05-19 15:37:57 +02:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var req getUploadArtifactRequest
|
|
|
|
if err := json.NewDecoder(ctx.Req.Body).Decode(&req); err != nil {
|
|
|
|
log.Error("Error decode request body: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error decode request body")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-09-06 09:41:06 +02:00
|
|
|
// set retention days
|
|
|
|
retentionQuery := ""
|
|
|
|
if req.RetentionDays > 0 {
|
|
|
|
retentionQuery = fmt.Sprintf("?retentionDays=%d", req.RetentionDays)
|
|
|
|
}
|
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
// use md5(artifact_name) to create upload url
|
|
|
|
artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(req.Name)))
|
2023-05-19 15:37:57 +02:00
|
|
|
resp := getUploadArtifactResponse{
|
2023-09-06 09:41:06 +02:00
|
|
|
FileContainerResourceURL: ar.buildArtifactURL(runID, artifactHash, "upload"+retentionQuery),
|
2023-05-19 15:37:57 +02:00
|
|
|
}
|
2023-07-21 04:42:01 +02:00
|
|
|
log.Debug("[artifact] get upload url: %s", resp.FileContainerResourceURL)
|
2023-05-19 15:37:57 +02:00
|
|
|
ctx.JSON(http.StatusOK, resp)
|
|
|
|
}
|
|
|
|
|
2023-05-21 03:50:53 +02:00
|
|
|
func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
|
2023-07-21 04:42:01 +02:00
|
|
|
task, runID, ok := validateRunID(ctx)
|
2023-05-19 15:37:57 +02:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2023-07-21 04:42:01 +02:00
|
|
|
artifactName, artifactPath, ok := parseArtifactItemPath(ctx)
|
|
|
|
if !ok {
|
2023-05-19 15:37:57 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 10:30:16 +02:00
|
|
|
// check the owner's quota
|
|
|
|
ok, err := quota_model.EvaluateForUser(ctx, ctx.ActionTask.OwnerID, quota_model.LimitSubjectSizeAssetsArtifacts)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("quota_model.EvaluateForUser: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error checking quota")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !ok {
|
|
|
|
ctx.Error(http.StatusRequestEntityTooLarge, "Quota exceeded")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
// get upload file size
|
2024-06-11 20:47:45 +02:00
|
|
|
fileRealTotalSize, contentLength := getUploadFileSize(ctx)
|
2023-05-19 15:37:57 +02:00
|
|
|
|
2023-09-06 09:41:06 +02:00
|
|
|
// get artifact retention days
|
|
|
|
expiredDays := setting.Actions.ArtifactRetentionDays
|
|
|
|
if queryRetentionDays := ctx.Req.URL.Query().Get("retentionDays"); queryRetentionDays != "" {
|
2024-06-11 20:47:45 +02:00
|
|
|
var err error
|
2023-09-06 09:41:06 +02:00
|
|
|
expiredDays, err = strconv.ParseInt(queryRetentionDays, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Error parse retention days: %v", err)
|
|
|
|
ctx.Error(http.StatusBadRequest, "Error parse retention days")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Debug("[artifact] upload chunk, name: %s, path: %s, size: %d, retention days: %d",
|
|
|
|
artifactName, artifactPath, fileRealTotalSize, expiredDays)
|
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
// create or get artifact with name and path
|
2023-09-06 09:41:06 +02:00
|
|
|
artifact, err := actions.CreateArtifact(ctx, task, artifactName, artifactPath, expiredDays)
|
2023-05-19 15:37:57 +02:00
|
|
|
if err != nil {
|
2023-07-21 04:42:01 +02:00
|
|
|
log.Error("Error create or get artifact: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error create or get artifact")
|
2023-05-19 15:37:57 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
// save chunk to storage, if success, return chunk stotal size
|
|
|
|
// if artifact is not gzip when uploading, chunksTotalSize == fileRealTotalSize
|
|
|
|
// if artifact is gzip when uploading, chunksTotalSize < fileRealTotalSize
|
|
|
|
chunksTotalSize, err := saveUploadChunk(ar.fs, ctx, artifact, contentLength, runID)
|
2023-05-19 15:37:57 +02:00
|
|
|
if err != nil {
|
2023-07-21 04:42:01 +02:00
|
|
|
log.Error("Error save upload chunk: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error save upload chunk")
|
2023-05-19 15:37:57 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-01-17 04:21:16 +01:00
|
|
|
// update artifact size if zero or not match, over write artifact size
|
|
|
|
if artifact.FileSize == 0 ||
|
|
|
|
artifact.FileCompressedSize == 0 ||
|
|
|
|
artifact.FileSize != fileRealTotalSize ||
|
|
|
|
artifact.FileCompressedSize != chunksTotalSize {
|
2023-07-21 04:42:01 +02:00
|
|
|
artifact.FileSize = fileRealTotalSize
|
|
|
|
artifact.FileCompressedSize = chunksTotalSize
|
2023-05-19 15:37:57 +02:00
|
|
|
artifact.ContentEncoding = ctx.Req.Header.Get("Content-Encoding")
|
|
|
|
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
2023-07-21 04:42:01 +02:00
|
|
|
log.Error("Error update artifact: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error update artifact")
|
2023-05-19 15:37:57 +02:00
|
|
|
return
|
|
|
|
}
|
2024-01-17 04:21:16 +01:00
|
|
|
log.Debug("[artifact] update artifact size, artifact_id: %d, size: %d, compressed size: %d",
|
|
|
|
artifact.ID, artifact.FileSize, artifact.FileCompressedSize)
|
2023-05-19 15:37:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx.JSON(http.StatusOK, map[string]string{
|
|
|
|
"message": "success",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2024-05-09 15:49:37 +02:00
|
|
|
// comfirmUploadArtifact confirm upload artifact.
|
2023-05-19 15:37:57 +02:00
|
|
|
// if all chunks are uploaded, merge them to one file.
|
2023-05-21 03:50:53 +02:00
|
|
|
func (ar artifactRoutes) comfirmUploadArtifact(ctx *ArtifactContext) {
|
2023-07-21 04:42:01 +02:00
|
|
|
_, runID, ok := validateRunID(ctx)
|
2023-05-19 15:37:57 +02:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2023-07-21 04:42:01 +02:00
|
|
|
artifactName := ctx.Req.URL.Query().Get("artifactName")
|
|
|
|
if artifactName == "" {
|
2024-03-19 16:00:48 +01:00
|
|
|
log.Warn("Error artifact name is empty")
|
2023-07-21 04:42:01 +02:00
|
|
|
ctx.Error(http.StatusBadRequest, "Error artifact name is empty")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := mergeChunksForRun(ctx, ar.fs, runID, artifactName); err != nil {
|
|
|
|
log.Error("Error merge chunks: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
2023-05-19 15:37:57 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
ctx.JSON(http.StatusOK, map[string]string{
|
|
|
|
"message": "success",
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
type (
|
|
|
|
listArtifactsResponse struct {
|
|
|
|
Count int64 `json:"count"`
|
|
|
|
Value []listArtifactsResponseItem `json:"value"`
|
|
|
|
}
|
|
|
|
listArtifactsResponseItem struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
FileContainerResourceURL string `json:"fileContainerResourceUrl"`
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2023-05-21 03:50:53 +02:00
|
|
|
func (ar artifactRoutes) listArtifacts(ctx *ArtifactContext) {
|
2023-07-21 04:42:01 +02:00
|
|
|
_, runID, ok := validateRunID(ctx)
|
2023-05-19 15:37:57 +02:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-11-24 04:49:41 +01:00
|
|
|
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{RunID: runID})
|
2023-05-19 15:37:57 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Error("Error getting artifacts: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, err.Error())
|
|
|
|
return
|
|
|
|
}
|
2023-07-21 04:42:01 +02:00
|
|
|
if len(artifacts) == 0 {
|
|
|
|
log.Debug("[artifact] handleListArtifacts, no artifacts")
|
|
|
|
ctx.Error(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
2023-05-19 15:37:57 +02:00
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
var (
|
|
|
|
items []listArtifactsResponseItem
|
|
|
|
values = make(map[string]bool)
|
|
|
|
)
|
|
|
|
|
|
|
|
for _, art := range artifacts {
|
|
|
|
if values[art.ArtifactName] {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(art.ArtifactName)))
|
|
|
|
item := listArtifactsResponseItem{
|
|
|
|
Name: art.ArtifactName,
|
|
|
|
FileContainerResourceURL: ar.buildArtifactURL(runID, artifactHash, "download_url"),
|
|
|
|
}
|
|
|
|
items = append(items, item)
|
|
|
|
values[art.ArtifactName] = true
|
|
|
|
|
|
|
|
log.Debug("[artifact] handleListArtifacts, name: %s, url: %s", item.Name, item.FileContainerResourceURL)
|
2023-05-19 15:37:57 +02:00
|
|
|
}
|
2023-07-21 04:42:01 +02:00
|
|
|
|
2023-05-19 15:37:57 +02:00
|
|
|
respData := listArtifactsResponse{
|
2023-07-21 04:42:01 +02:00
|
|
|
Count: int64(len(items)),
|
|
|
|
Value: items,
|
2023-05-19 15:37:57 +02:00
|
|
|
}
|
|
|
|
ctx.JSON(http.StatusOK, respData)
|
|
|
|
}
|
|
|
|
|
|
|
|
type (
|
|
|
|
downloadArtifactResponse struct {
|
|
|
|
Value []downloadArtifactResponseItem `json:"value"`
|
|
|
|
}
|
|
|
|
downloadArtifactResponseItem struct {
|
|
|
|
Path string `json:"path"`
|
|
|
|
ItemType string `json:"itemType"`
|
|
|
|
ContentLocation string `json:"contentLocation"`
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
// getDownloadArtifactURL generates download url for each artifact
|
2023-05-21 03:50:53 +02:00
|
|
|
func (ar artifactRoutes) getDownloadArtifactURL(ctx *ArtifactContext) {
|
2023-07-21 04:42:01 +02:00
|
|
|
_, runID, ok := validateRunID(ctx)
|
2023-05-19 15:37:57 +02:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
itemPath := util.PathJoinRel(ctx.Req.URL.Query().Get("itemPath"))
|
|
|
|
if !validateArtifactHash(ctx, itemPath) {
|
2023-05-19 15:37:57 +02:00
|
|
|
return
|
2023-07-21 04:42:01 +02:00
|
|
|
}
|
|
|
|
|
2023-11-24 04:49:41 +01:00
|
|
|
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{
|
|
|
|
RunID: runID,
|
|
|
|
ArtifactName: itemPath,
|
|
|
|
})
|
2023-07-21 04:42:01 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Error("Error getting artifacts: %v", err)
|
2023-05-19 15:37:57 +02:00
|
|
|
ctx.Error(http.StatusInternalServerError, err.Error())
|
|
|
|
return
|
|
|
|
}
|
2023-07-21 04:42:01 +02:00
|
|
|
if len(artifacts) == 0 {
|
|
|
|
log.Debug("[artifact] getDownloadArtifactURL, no artifacts")
|
|
|
|
ctx.Error(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if itemPath != artifacts[0].ArtifactName {
|
2024-08-08 18:07:35 +02:00
|
|
|
log.Error("Error mismatch artifact name, itemPath: %v, artifact: %v", itemPath, artifacts[0].ArtifactName)
|
|
|
|
ctx.Error(http.StatusBadRequest, "Error mismatch artifact name")
|
2023-07-21 04:42:01 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var items []downloadArtifactResponseItem
|
|
|
|
for _, artifact := range artifacts {
|
2024-02-15 21:39:50 +01:00
|
|
|
var downloadURL string
|
|
|
|
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
Fix `missing signature key` error when pulling Docker images with `SERVE_DIRECT` enabled (#32365)
Fix #28121
I did some tests and found that the `missing signature key` error is
caused by an incorrect `Content-Type` header. Gitea correctly sets the
`Content-Type` header when serving files.
https://github.com/go-gitea/gitea/blob/348d1d0f322ca57c459acd902f54821d687ca804/routers/api/packages/container/container.go#L712-L717
However, when `SERVE_DIRECT` is enabled, the `Content-Type` header may
be set to an incorrect value by the storage service. To fix this issue,
we can use query parameters to override response header values.
https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
<img width="600px"
src="https://github.com/user-attachments/assets/f2ff90f0-f1df-46f9-9680-b8120222c555"
/>
In this PR, I introduced a new parameter to the `URL` method to support
additional parameters.
```
URL(path, name string, reqParams url.Values) (*url.URL, error)
```
---
Most S3-like services support specifying the content type when storing
objects. However, Gitea always use `application/octet-stream`.
Therefore, I believe we also need to improve the `Save` method to
support storing objects with the correct content type.
https://github.com/go-gitea/gitea/blob/b7fb20e73e63b8edc9b90c52073e248bef428fcc/modules/storage/minio.go#L214-L221
(cherry picked from commit 0690cb076bf63f71988a709f62a9c04660b51a4f)
Conflicts:
- modules/storage/azureblob.go
Dropped the change, as we do not support Azure blob storage.
- modules/storage/helper.go
Resolved by adjusting their `discardStorage` to our
`DiscardStorage`
- routers/api/actions/artifacts.go
routers/api/actions/artifactsv4.go
routers/web/repo/actions/view.go
routers/web/repo/download.go
Resolved the conflicts by manually adding the new `nil`
parameter to the `storage.Attachments.URL()` calls.
Originally conflicted due to differences in the if expression
above these calls.
2024-10-31 16:28:25 +01:00
|
|
|
u, err := ar.fs.URL(artifact.StoragePath, artifact.ArtifactName, nil)
|
2024-02-15 21:39:50 +01:00
|
|
|
if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
|
|
|
|
log.Error("Error getting serve direct url: %v", err)
|
|
|
|
}
|
|
|
|
if u != nil {
|
|
|
|
downloadURL = u.String()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if downloadURL == "" {
|
|
|
|
downloadURL = ar.buildArtifactURL(runID, strconv.FormatInt(artifact.ID, 10), "download")
|
|
|
|
}
|
2023-07-21 04:42:01 +02:00
|
|
|
item := downloadArtifactResponseItem{
|
2023-05-19 15:37:57 +02:00
|
|
|
Path: util.PathJoinRel(itemPath, artifact.ArtifactPath),
|
|
|
|
ItemType: "file",
|
|
|
|
ContentLocation: downloadURL,
|
2023-07-21 04:42:01 +02:00
|
|
|
}
|
|
|
|
log.Debug("[artifact] getDownloadArtifactURL, path: %s, url: %s", item.Path, item.ContentLocation)
|
|
|
|
items = append(items, item)
|
|
|
|
}
|
|
|
|
respData := downloadArtifactResponse{
|
|
|
|
Value: items,
|
2023-05-19 15:37:57 +02:00
|
|
|
}
|
|
|
|
ctx.JSON(http.StatusOK, respData)
|
|
|
|
}
|
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
// downloadArtifact downloads artifact content
|
2023-05-21 03:50:53 +02:00
|
|
|
func (ar artifactRoutes) downloadArtifact(ctx *ArtifactContext) {
|
2023-07-21 04:42:01 +02:00
|
|
|
_, runID, ok := validateRunID(ctx)
|
2023-05-19 15:37:57 +02:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
artifactID := ctx.ParamsInt64("artifact_id")
|
2023-12-25 21:25:29 +01:00
|
|
|
artifact, exist, err := db.GetByID[actions.ActionArtifact](ctx, artifactID)
|
|
|
|
if err != nil {
|
2023-05-19 15:37:57 +02:00
|
|
|
log.Error("Error getting artifact: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, err.Error())
|
|
|
|
return
|
2024-04-28 06:13:57 +02:00
|
|
|
}
|
|
|
|
if !exist {
|
2023-12-25 21:25:29 +01:00
|
|
|
log.Error("artifact with ID %d does not exist", artifactID)
|
|
|
|
ctx.Error(http.StatusNotFound, fmt.Sprintf("artifact with ID %d does not exist", artifactID))
|
|
|
|
return
|
2023-05-19 15:37:57 +02:00
|
|
|
}
|
|
|
|
if artifact.RunID != runID {
|
2024-04-28 06:13:57 +02:00
|
|
|
log.Error("Error mismatch runID and artifactID, task: %v, artifact: %v", runID, artifactID)
|
|
|
|
ctx.Error(http.StatusBadRequest)
|
2023-05-19 15:37:57 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
fd, err := ar.fs.Open(artifact.StoragePath)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Error opening file: %v", err)
|
|
|
|
ctx.Error(http.StatusInternalServerError, err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer fd.Close()
|
|
|
|
|
2023-07-21 04:42:01 +02:00
|
|
|
// if artifact is compressed, set content-encoding header to gzip
|
|
|
|
if artifact.ContentEncoding == "gzip" {
|
2023-05-19 15:37:57 +02:00
|
|
|
ctx.Resp.Header().Set("Content-Encoding", "gzip")
|
|
|
|
}
|
2023-07-21 04:42:01 +02:00
|
|
|
log.Debug("[artifact] downloadArtifact, name: %s, path: %s, storage: %s, size: %d", artifact.ArtifactName, artifact.ArtifactPath, artifact.StoragePath, artifact.FileSize)
|
2023-05-19 15:37:57 +02:00
|
|
|
ctx.ServeContent(fd, &context.ServeHeaderOptions{
|
|
|
|
Filename: artifact.ArtifactName,
|
|
|
|
LastModified: artifact.CreatedUnix.AsLocalTime(),
|
|
|
|
})
|
|
|
|
}
|