mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2025-03-04 08:14:43 +01:00
Alt Linux Apt-Rpm repository support for Forgejo packages. (#6351)
Co-authored-by: Aleksandr Gamzin alexgamz1119@gmail.com Adds support for the Apt-Rpm registry of the Alt Lunux distribution. Alt Linux uses RPM packages to store and distribute software to its users. But the logic of the Alt Linux package registry is different from the Red Hat package registry. I have added support for the Alt Linux package registry. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests - I added test coverage for Go changes... - [ ] in their respective `*_test.go` for unit tests. - [x] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I added test coverage for JavaScript changes... - [ ] in `web_src/js/*.test.js` if it can be unit tested. - [ ] in `tests/e2e/*.test.e2e.js` if it requires interactions with a live Forgejo server (see also the [developer guide for JavaScript testing](https://codeberg.org/forgejo/forgejo/src/branch/forgejo/tests/e2e/README.md#end-to-end-tests)). ### Documentation - [x] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [ ] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] I do not want this change to show in the release notes. - [x] I want the title to show in the release notes with a link to this pull request. - [ ] I want the content of the `release-notes/<pull request number>.md` to be be used for the release notes instead of the title. Co-authored-by: Aleksandr Gamzin <gamzin@altlinux.org> Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/6351 Reviewed-by: Earl Warren <earl-warren@noreply.codeberg.org> Co-authored-by: Alex619829 <alex619829@noreply.codeberg.org> Co-committed-by: Alex619829 <alex619829@noreply.codeberg.org>
This commit is contained in:
parent
a40284bec4
commit
7ae5376573
32 changed files with 2157 additions and 87 deletions
|
@ -88,6 +88,8 @@ var migrations = []*Migration{
|
|||
NewMigration("Add `purpose` column to `forgejo_auth_token` table", AddPurposeToForgejoAuthToken),
|
||||
// v25 -> v26
|
||||
NewMigration("Migrate `secret` column to store keying material", MigrateTwoFactorToKeying),
|
||||
// v26 -> v27
|
||||
NewMigration("Add `hash_blake2b` column to `package_blob` table", AddHashBlake2bToPackageBlob),
|
||||
}
|
||||
|
||||
// GetCurrentDBVersion returns the current Forgejo database version.
|
||||
|
|
14
models/forgejo_migrations/v26.go
Normal file
14
models/forgejo_migrations/v26.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2024 The Forgejo Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package forgejo_migrations //nolint:revive
|
||||
|
||||
import "xorm.io/xorm"
|
||||
|
||||
func AddHashBlake2bToPackageBlob(x *xorm.Engine) error {
|
||||
type PackageBlob struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
HashBlake2b string
|
||||
}
|
||||
return x.Sync(&PackageBlob{})
|
||||
}
|
29
models/packages/alt/search.go
Normal file
29
models/packages/alt/search.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package alt
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
packages_model "code.gitea.io/gitea/models/packages"
|
||||
rpm_module "code.gitea.io/gitea/modules/packages/rpm"
|
||||
)
|
||||
|
||||
type PackageSearchOptions struct {
|
||||
OwnerID int64
|
||||
GroupID int64
|
||||
Architecture string
|
||||
}
|
||||
|
||||
// GetGroups gets all available groups
|
||||
func GetGroups(ctx context.Context, ownerID int64) ([]string, error) {
|
||||
return packages_model.GetDistinctPropertyValues(
|
||||
ctx,
|
||||
packages_model.TypeAlt,
|
||||
ownerID,
|
||||
packages_model.PropertyTypeFile,
|
||||
rpm_module.PropertyGroup,
|
||||
nil,
|
||||
)
|
||||
}
|
|
@ -187,6 +187,8 @@ func GetPackageDescriptor(ctx context.Context, pv *PackageVersion) (*PackageDesc
|
|||
metadata = &pypi.Metadata{}
|
||||
case TypeRpm:
|
||||
metadata = &rpm.VersionMetadata{}
|
||||
case TypeAlt:
|
||||
metadata = &rpm.VersionMetadata{}
|
||||
case TypeRubyGems:
|
||||
metadata = &rubygems.Metadata{}
|
||||
case TypeSwift:
|
||||
|
|
|
@ -51,6 +51,7 @@ const (
|
|||
TypePub Type = "pub"
|
||||
TypePyPI Type = "pypi"
|
||||
TypeRpm Type = "rpm"
|
||||
TypeAlt Type = "alt"
|
||||
TypeRubyGems Type = "rubygems"
|
||||
TypeSwift Type = "swift"
|
||||
TypeVagrant Type = "vagrant"
|
||||
|
@ -76,6 +77,7 @@ var TypeList = []Type{
|
|||
TypePub,
|
||||
TypePyPI,
|
||||
TypeRpm,
|
||||
TypeAlt,
|
||||
TypeRubyGems,
|
||||
TypeSwift,
|
||||
TypeVagrant,
|
||||
|
@ -122,6 +124,8 @@ func (pt Type) Name() string {
|
|||
return "PyPI"
|
||||
case TypeRpm:
|
||||
return "RPM"
|
||||
case TypeAlt:
|
||||
return "Alt"
|
||||
case TypeRubyGems:
|
||||
return "RubyGems"
|
||||
case TypeSwift:
|
||||
|
@ -173,6 +177,8 @@ func (pt Type) SVGName() string {
|
|||
return "gitea-python"
|
||||
case TypeRpm:
|
||||
return "gitea-rpm"
|
||||
case TypeAlt:
|
||||
return "gitea-alt"
|
||||
case TypeRubyGems:
|
||||
return "gitea-rubygems"
|
||||
case TypeSwift:
|
||||
|
|
|
@ -34,6 +34,7 @@ type PackageBlob struct {
|
|||
HashSHA1 string `xorm:"hash_sha1 char(40) UNIQUE(sha1) INDEX NOT NULL"`
|
||||
HashSHA256 string `xorm:"hash_sha256 char(64) UNIQUE(sha256) INDEX NOT NULL"`
|
||||
HashSHA512 string `xorm:"hash_sha512 char(128) UNIQUE(sha512) INDEX NOT NULL"`
|
||||
HashBlake2b string `xorm:"hash_blake2b char(128) UNIQUE(blake2b) INDEX"`
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"created INDEX NOT NULL"`
|
||||
}
|
||||
|
||||
|
@ -44,11 +45,12 @@ func GetOrInsertBlob(ctx context.Context, pb *PackageBlob) (*PackageBlob, bool,
|
|||
existing := &PackageBlob{}
|
||||
|
||||
has, err := e.Where(builder.Eq{
|
||||
"size": pb.Size,
|
||||
"hash_md5": pb.HashMD5,
|
||||
"hash_sha1": pb.HashSHA1,
|
||||
"hash_sha256": pb.HashSHA256,
|
||||
"hash_sha512": pb.HashSHA512,
|
||||
"size": pb.Size,
|
||||
"hash_md5": pb.HashMD5,
|
||||
"hash_sha1": pb.HashSHA1,
|
||||
"hash_sha256": pb.HashSHA256,
|
||||
"hash_sha512": pb.HashSHA512,
|
||||
"hash_blake2b": pb.HashBlake2b,
|
||||
}).Get(existing)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
|
|
|
@ -94,7 +94,7 @@ type FileMetadata struct {
|
|||
|
||||
// ParsePackage Function that receives arch package archive data and returns it's metadata.
|
||||
func ParsePackage(r *packages.HashedBuffer) (*Package, error) {
|
||||
md5, _, sha256, _ := r.Sums()
|
||||
md5, _, sha256, _, _ := r.Sums()
|
||||
_, err := r.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -75,7 +75,7 @@ func (b *HashedBuffer) Write(p []byte) (int, error) {
|
|||
return b.combinedWriter.Write(p)
|
||||
}
|
||||
|
||||
// Sums gets the MD5, SHA1, SHA256 and SHA512 checksums of the data
|
||||
func (b *HashedBuffer) Sums() (hashMD5, hashSHA1, hashSHA256, hashSHA512 []byte) {
|
||||
// Sums gets the MD5, SHA1, SHA256, SHA512 and BLAKE2B checksums of the data
|
||||
func (b *HashedBuffer) Sums() (hashMD5, hashSHA1, hashSHA256, hashSHA512, hashBlake2b []byte) {
|
||||
return b.hash.Sums()
|
||||
}
|
||||
|
|
|
@ -21,9 +21,10 @@ func TestHashedBuffer(t *testing.T) {
|
|||
HashSHA1 string
|
||||
HashSHA256 string
|
||||
HashSHA512 string
|
||||
hashBlake2b string
|
||||
}{
|
||||
{5, "test", "098f6bcd4621d373cade4e832627b4f6", "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", "ee26b0dd4af7e749aa1a8ee3c10ae9923f618980772e473f8819a5d4940e0db27ac185f8a0e1d5f84f88bc887fd67b143732c304cc5fa9ad8e6f57f50028a8ff"},
|
||||
{5, "testtest", "05a671c66aefea124cc08b76ea6d30bb", "51abb9636078defbf888d8457a7c76f85c8f114c", "37268335dd6931045bdcdf92623ff819a64244b53d0e746d438797349d4da578", "125d6d03b32c84d492747f79cf0bf6e179d287f341384eb5d6d3197525ad6be8e6df0116032935698f99a09e265073d1d6c32c274591bf1d0a20ad67cba921bc"},
|
||||
{5, "test", "098f6bcd4621d373cade4e832627b4f6", "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", "9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", "ee26b0dd4af7e749aa1a8ee3c10ae9923f618980772e473f8819a5d4940e0db27ac185f8a0e1d5f84f88bc887fd67b143732c304cc5fa9ad8e6f57f50028a8ff", "a71079d42853dea26e453004338670a53814b78137ffbed07603a41d76a483aa9bc33b582f77d30a65e6f29a896c0411f38312e1d66e0bf16386c86a89bea572"},
|
||||
{5, "testtest", "05a671c66aefea124cc08b76ea6d30bb", "51abb9636078defbf888d8457a7c76f85c8f114c", "37268335dd6931045bdcdf92623ff819a64244b53d0e746d438797349d4da578", "125d6d03b32c84d492747f79cf0bf6e179d287f341384eb5d6d3197525ad6be8e6df0116032935698f99a09e265073d1d6c32c274591bf1d0a20ad67cba921bc", "372a53b95f46e775b973031e40b844f24389657019f7b7540a9f0496f4ead4a2e4b050909664611fb0f4b7c7e92c3c04c84787be7f6b8edf7bf6bc31856b6c76"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
|
@ -36,11 +37,12 @@ func TestHashedBuffer(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
assert.Equal(t, c.Data, string(data))
|
||||
|
||||
hashMD5, hashSHA1, hashSHA256, hashSHA512 := buf.Sums()
|
||||
hashMD5, hashSHA1, hashSHA256, hashSHA512, hashBlake2b := buf.Sums()
|
||||
assert.Equal(t, c.HashMD5, hex.EncodeToString(hashMD5))
|
||||
assert.Equal(t, c.HashSHA1, hex.EncodeToString(hashSHA1))
|
||||
assert.Equal(t, c.HashSHA256, hex.EncodeToString(hashSHA256))
|
||||
assert.Equal(t, c.HashSHA512, hex.EncodeToString(hashSHA512))
|
||||
assert.Equal(t, c.hashBlake2b, hex.EncodeToString(hashBlake2b))
|
||||
|
||||
require.NoError(t, buf.Close())
|
||||
}
|
||||
|
|
|
@ -12,28 +12,32 @@ import (
|
|||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
const (
|
||||
marshaledSizeMD5 = 92
|
||||
marshaledSizeSHA1 = 96
|
||||
marshaledSizeSHA256 = 108
|
||||
marshaledSizeSHA512 = 204
|
||||
marshaledSizeMD5 = 92
|
||||
marshaledSizeSHA1 = 96
|
||||
marshaledSizeSHA256 = 108
|
||||
marshaledSizeSHA512 = 204
|
||||
marshaledSizeBlake2b = 213
|
||||
|
||||
marshaledSize = marshaledSizeMD5 + marshaledSizeSHA1 + marshaledSizeSHA256 + marshaledSizeSHA512
|
||||
marshaledSize = marshaledSizeMD5 + marshaledSizeSHA1 + marshaledSizeSHA256 + marshaledSizeSHA512 + marshaledSizeBlake2b
|
||||
)
|
||||
|
||||
// HashSummer provide a Sums method
|
||||
type HashSummer interface {
|
||||
Sums() (hashMD5, hashSHA1, hashSHA256, hashSHA512 []byte)
|
||||
Sums() (hashMD5, hashSHA1, hashSHA256, hashSHA512, hashBlake2b []byte)
|
||||
}
|
||||
|
||||
// MultiHasher calculates multiple checksums
|
||||
type MultiHasher struct {
|
||||
md5 hash.Hash
|
||||
sha1 hash.Hash
|
||||
sha256 hash.Hash
|
||||
sha512 hash.Hash
|
||||
md5 hash.Hash
|
||||
sha1 hash.Hash
|
||||
sha256 hash.Hash
|
||||
sha512 hash.Hash
|
||||
blake2b hash.Hash
|
||||
|
||||
combinedWriter io.Writer
|
||||
}
|
||||
|
@ -44,14 +48,16 @@ func NewMultiHasher() *MultiHasher {
|
|||
sha1 := sha1.New()
|
||||
sha256 := sha256.New()
|
||||
sha512 := sha512.New()
|
||||
blake2b, _ := blake2b.New512(nil)
|
||||
|
||||
combinedWriter := io.MultiWriter(md5, sha1, sha256, sha512)
|
||||
combinedWriter := io.MultiWriter(md5, sha1, sha256, sha512, blake2b)
|
||||
|
||||
return &MultiHasher{
|
||||
md5,
|
||||
sha1,
|
||||
sha256,
|
||||
sha512,
|
||||
blake2b,
|
||||
combinedWriter,
|
||||
}
|
||||
}
|
||||
|
@ -74,12 +80,17 @@ func (h *MultiHasher) MarshalBinary() ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blake2bBytes, err := h.blake2b.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, md5Bytes...)
|
||||
b = append(b, sha1Bytes...)
|
||||
b = append(b, sha256Bytes...)
|
||||
b = append(b, sha512Bytes...)
|
||||
b = append(b, blake2bBytes...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
|
@ -104,7 +115,12 @@ func (h *MultiHasher) UnmarshalBinary(b []byte) error {
|
|||
}
|
||||
|
||||
b = b[marshaledSizeSHA256:]
|
||||
return h.sha512.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[:marshaledSizeSHA512])
|
||||
if err := h.sha512.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[:marshaledSizeSHA512]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b = b[marshaledSizeSHA512:]
|
||||
return h.blake2b.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[:marshaledSizeBlake2b])
|
||||
}
|
||||
|
||||
// Write implements io.Writer
|
||||
|
@ -113,10 +129,11 @@ func (h *MultiHasher) Write(p []byte) (int, error) {
|
|||
}
|
||||
|
||||
// Sums gets the MD5, SHA1, SHA256 and SHA512 checksums of the data
|
||||
func (h *MultiHasher) Sums() (hashMD5, hashSHA1, hashSHA256, hashSHA512 []byte) {
|
||||
func (h *MultiHasher) Sums() (hashMD5, hashSHA1, hashSHA256, hashSHA512, hashBlake2b []byte) {
|
||||
hashMD5 = h.md5.Sum(nil)
|
||||
hashSHA1 = h.sha1.Sum(nil)
|
||||
hashSHA256 = h.sha256.Sum(nil)
|
||||
hashSHA512 = h.sha512.Sum(nil)
|
||||
return hashMD5, hashSHA1, hashSHA256, hashSHA512
|
||||
hashBlake2b = h.blake2b.Sum(nil)
|
||||
return hashMD5, hashSHA1, hashSHA256, hashSHA512, hashBlake2b
|
||||
}
|
||||
|
|
|
@ -12,10 +12,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
expectedMD5 = "e3bef03c5f3b7f6b3ab3e3053ed71e9c"
|
||||
expectedSHA1 = "060b3b99f88e96085b4a68e095bc9e3d1d91e1bc"
|
||||
expectedSHA256 = "6ccce4863b70f258d691f59609d31b4502e1ba5199942d3bc5d35d17a4ce771d"
|
||||
expectedSHA512 = "7f70e439ba8c52025c1f06cdf6ae443c4b8ed2e90059cdb9bbbf8adf80846f185a24acca9245b128b226d61753b0d7ed46580a69c8999eeff3bc13a4d0bd816c"
|
||||
expectedMD5 = "e3bef03c5f3b7f6b3ab3e3053ed71e9c"
|
||||
expectedSHA1 = "060b3b99f88e96085b4a68e095bc9e3d1d91e1bc"
|
||||
expectedSHA256 = "6ccce4863b70f258d691f59609d31b4502e1ba5199942d3bc5d35d17a4ce771d"
|
||||
expectedSHA512 = "7f70e439ba8c52025c1f06cdf6ae443c4b8ed2e90059cdb9bbbf8adf80846f185a24acca9245b128b226d61753b0d7ed46580a69c8999eeff3bc13a4d0bd816c"
|
||||
expectedBlake2b = "b3c3ad15c7e6cca543d651add9427727ffb525120eb23264ee35f16f408a369b599d4404a52d29f642fc0d869f9b55581b60e4e8b9b74997182705d3dcb01edb"
|
||||
)
|
||||
|
||||
func TestMultiHasherSums(t *testing.T) {
|
||||
|
@ -23,12 +24,13 @@ func TestMultiHasherSums(t *testing.T) {
|
|||
h := NewMultiHasher()
|
||||
h.Write([]byte("gitea"))
|
||||
|
||||
hashMD5, hashSHA1, hashSHA256, hashSHA512 := h.Sums()
|
||||
hashMD5, hashSHA1, hashSHA256, hashSHA512, hashBlake2b := h.Sums()
|
||||
|
||||
assert.Equal(t, expectedMD5, hex.EncodeToString(hashMD5))
|
||||
assert.Equal(t, expectedSHA1, hex.EncodeToString(hashSHA1))
|
||||
assert.Equal(t, expectedSHA256, hex.EncodeToString(hashSHA256))
|
||||
assert.Equal(t, expectedSHA512, hex.EncodeToString(hashSHA512))
|
||||
assert.Equal(t, expectedBlake2b, hex.EncodeToString(hashBlake2b))
|
||||
})
|
||||
|
||||
t.Run("State", func(t *testing.T) {
|
||||
|
@ -44,11 +46,12 @@ func TestMultiHasherSums(t *testing.T) {
|
|||
|
||||
h2.Write([]byte("ea"))
|
||||
|
||||
hashMD5, hashSHA1, hashSHA256, hashSHA512 := h2.Sums()
|
||||
hashMD5, hashSHA1, hashSHA256, hashSHA512, hashBlake2b := h2.Sums()
|
||||
|
||||
assert.Equal(t, expectedMD5, hex.EncodeToString(hashMD5))
|
||||
assert.Equal(t, expectedSHA1, hex.EncodeToString(hashSHA1))
|
||||
assert.Equal(t, expectedSHA256, hex.EncodeToString(hashSHA256))
|
||||
assert.Equal(t, expectedSHA512, hex.EncodeToString(hashSHA512))
|
||||
assert.Equal(t, expectedBlake2b, hex.EncodeToString(hashBlake2b))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -78,11 +78,12 @@ type FileMetadata struct {
|
|||
}
|
||||
|
||||
type Entry struct {
|
||||
Name string `json:"name" xml:"name,attr"`
|
||||
Flags string `json:"flags,omitempty" xml:"flags,attr,omitempty"`
|
||||
Version string `json:"version,omitempty" xml:"ver,attr,omitempty"`
|
||||
Epoch string `json:"epoch,omitempty" xml:"epoch,attr,omitempty"`
|
||||
Release string `json:"release,omitempty" xml:"rel,attr,omitempty"`
|
||||
Name string `json:"name" xml:"name,attr"`
|
||||
Flags string `json:"flags,omitempty" xml:"flags,attr,omitempty"`
|
||||
AltFlags uint32 `json:"alt_flags,omitempty" xml:"alt_flags,attr,omitempty"`
|
||||
Version string `json:"version,omitempty" xml:"ver,attr,omitempty"`
|
||||
Epoch string `json:"epoch,omitempty" xml:"epoch,attr,omitempty"`
|
||||
Release string `json:"release,omitempty" xml:"rel,attr,omitempty"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
|
@ -98,7 +99,7 @@ type Changelog struct {
|
|||
}
|
||||
|
||||
// ParsePackage parses the RPM package file
|
||||
func ParsePackage(r io.Reader) (*Package, error) {
|
||||
func ParsePackage(r io.Reader, repoType string) (*Package, error) {
|
||||
rpm, err := rpmutils.ReadRpm(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -138,10 +139,10 @@ func ParsePackage(r io.Reader) (*Package, error) {
|
|||
InstalledSize: getUInt64(rpm.Header, rpmutils.SIZE),
|
||||
ArchiveSize: getUInt64(rpm.Header, rpmutils.SIG_PAYLOADSIZE),
|
||||
|
||||
Provides: getEntries(rpm.Header, rpmutils.PROVIDENAME, rpmutils.PROVIDEVERSION, rpmutils.PROVIDEFLAGS),
|
||||
Requires: getEntries(rpm.Header, rpmutils.REQUIRENAME, rpmutils.REQUIREVERSION, rpmutils.REQUIREFLAGS),
|
||||
Conflicts: getEntries(rpm.Header, rpmutils.CONFLICTNAME, rpmutils.CONFLICTVERSION, rpmutils.CONFLICTFLAGS),
|
||||
Obsoletes: getEntries(rpm.Header, rpmutils.OBSOLETENAME, rpmutils.OBSOLETEVERSION, rpmutils.OBSOLETEFLAGS),
|
||||
Provides: getEntries(rpm.Header, rpmutils.PROVIDENAME, rpmutils.PROVIDEVERSION, rpmutils.PROVIDEFLAGS, repoType),
|
||||
Requires: getEntries(rpm.Header, rpmutils.REQUIRENAME, rpmutils.REQUIREVERSION, rpmutils.REQUIREFLAGS, repoType),
|
||||
Conflicts: getEntries(rpm.Header, rpmutils.CONFLICTNAME, rpmutils.CONFLICTVERSION, rpmutils.CONFLICTFLAGS, repoType),
|
||||
Obsoletes: getEntries(rpm.Header, rpmutils.OBSOLETENAME, rpmutils.OBSOLETEVERSION, rpmutils.OBSOLETEFLAGS, repoType),
|
||||
Files: getFiles(rpm.Header),
|
||||
Changelogs: getChangelogs(rpm.Header),
|
||||
},
|
||||
|
@ -170,7 +171,7 @@ func getUInt64(h *rpmutils.RpmHeader, tag int) uint64 {
|
|||
return values[0]
|
||||
}
|
||||
|
||||
func getEntries(h *rpmutils.RpmHeader, namesTag, versionsTag, flagsTag int) []*Entry {
|
||||
func getEntries(h *rpmutils.RpmHeader, namesTag, versionsTag, flagsTag int, repoType string) []*Entry {
|
||||
names, err := h.GetStrings(namesTag)
|
||||
if err != nil || len(names) == 0 {
|
||||
return nil
|
||||
|
@ -188,43 +189,54 @@ func getEntries(h *rpmutils.RpmHeader, namesTag, versionsTag, flagsTag int) []*E
|
|||
}
|
||||
|
||||
entries := make([]*Entry, 0, len(names))
|
||||
for i := range names {
|
||||
e := &Entry{
|
||||
Name: names[i],
|
||||
}
|
||||
|
||||
flags := flags[i]
|
||||
if (flags&rpmutils.RPMSENSE_GREATER) != 0 && (flags&rpmutils.RPMSENSE_EQUAL) != 0 {
|
||||
e.Flags = "GE"
|
||||
} else if (flags&rpmutils.RPMSENSE_LESS) != 0 && (flags&rpmutils.RPMSENSE_EQUAL) != 0 {
|
||||
e.Flags = "LE"
|
||||
} else if (flags & rpmutils.RPMSENSE_GREATER) != 0 {
|
||||
e.Flags = "GT"
|
||||
} else if (flags & rpmutils.RPMSENSE_LESS) != 0 {
|
||||
e.Flags = "LT"
|
||||
} else if (flags & rpmutils.RPMSENSE_EQUAL) != 0 {
|
||||
e.Flags = "EQ"
|
||||
}
|
||||
|
||||
version := versions[i]
|
||||
if version != "" {
|
||||
parts := strings.Split(version, "-")
|
||||
|
||||
versionParts := strings.Split(parts[0], ":")
|
||||
if len(versionParts) == 2 {
|
||||
e.Version = versionParts[1]
|
||||
e.Epoch = versionParts[0]
|
||||
} else {
|
||||
e.Version = versionParts[0]
|
||||
e.Epoch = "0"
|
||||
switch repoType {
|
||||
case "rpm":
|
||||
for i := range names {
|
||||
e := &Entry{
|
||||
Name: names[i],
|
||||
}
|
||||
|
||||
if len(parts) > 1 {
|
||||
e.Release = parts[1]
|
||||
flags := flags[i]
|
||||
if (flags&rpmutils.RPMSENSE_GREATER) != 0 && (flags&rpmutils.RPMSENSE_EQUAL) != 0 {
|
||||
e.Flags = "GE"
|
||||
} else if (flags&rpmutils.RPMSENSE_LESS) != 0 && (flags&rpmutils.RPMSENSE_EQUAL) != 0 {
|
||||
e.Flags = "LE"
|
||||
} else if (flags & rpmutils.RPMSENSE_GREATER) != 0 {
|
||||
e.Flags = "GT"
|
||||
} else if (flags & rpmutils.RPMSENSE_LESS) != 0 {
|
||||
e.Flags = "LT"
|
||||
} else if (flags & rpmutils.RPMSENSE_EQUAL) != 0 {
|
||||
e.Flags = "EQ"
|
||||
}
|
||||
}
|
||||
|
||||
entries = append(entries, e)
|
||||
version := versions[i]
|
||||
if version != "" {
|
||||
parts := strings.Split(version, "-")
|
||||
|
||||
versionParts := strings.Split(parts[0], ":")
|
||||
if len(versionParts) == 2 {
|
||||
e.Version = versionParts[1]
|
||||
e.Epoch = versionParts[0]
|
||||
} else {
|
||||
e.Version = versionParts[0]
|
||||
e.Epoch = "0"
|
||||
}
|
||||
|
||||
if len(parts) > 1 {
|
||||
e.Release = parts[1]
|
||||
}
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
case "alt":
|
||||
for i := range names {
|
||||
e := &Entry{
|
||||
AltFlags: uint32(flags[i]),
|
||||
}
|
||||
e.Version = versions[i]
|
||||
entries = append(entries, e)
|
||||
}
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ Mu0UFYgZ/bYnuvn/vz4wtCz8qMwsHUvP0PX3tbYFUctAPdrY6tiiDtcCddDECahx7SuVNP5dpmb5
|
|||
zr, err := gzip.NewReader(bytes.NewReader(rpmPackageContent))
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := ParsePackage(zr)
|
||||
p, err := ParsePackage(zr, "rpm")
|
||||
assert.NotNil(t, p)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ var (
|
|||
LimitSizePub int64
|
||||
LimitSizePyPI int64
|
||||
LimitSizeRpm int64
|
||||
LimitSizeAlt int64
|
||||
LimitSizeRubyGems int64
|
||||
LimitSizeSwift int64
|
||||
LimitSizeVagrant int64
|
||||
|
@ -106,6 +107,7 @@ func loadPackagesFrom(rootCfg ConfigProvider) (err error) {
|
|||
Packages.LimitSizeSwift = mustBytes(sec, "LIMIT_SIZE_SWIFT")
|
||||
Packages.LimitSizeVagrant = mustBytes(sec, "LIMIT_SIZE_VAGRANT")
|
||||
Packages.DefaultRPMSignEnabled = sec.Key("DEFAULT_RPM_SIGN_ENABLED").MustBool(false)
|
||||
Packages.LimitSizeAlt = mustBytes(sec, "LIMIT_SIZE_ALT")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -3724,6 +3724,13 @@ rpm.install = To install the package, run the following command:
|
|||
rpm.repository = Repository info
|
||||
rpm.repository.architectures = Architectures
|
||||
rpm.repository.multiple_groups = This package is available in multiple groups.
|
||||
alt.registry = Setup this registry from the command line:
|
||||
alt.registry.install = To install the package, run the following command:
|
||||
alt.install = Install package
|
||||
alt.setup = Add a repository to the list of connected repositories (choose the necessary architecture instead of '_arch_'):
|
||||
alt.repository = Repository Info
|
||||
alt.repository.architectures = Architectures
|
||||
alt.repository.multiple_groups = This package is available in multiple groups.
|
||||
rubygems.install = To install the package using gem, run the following command:
|
||||
rubygems.install2 = or add it to the Gemfile:
|
||||
rubygems.dependencies.runtime = Runtime dependencies
|
||||
|
|
1
public/assets/img/svg/gitea-alt.svg
generated
Normal file
1
public/assets/img/svg/gitea-alt.svg
generated
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 12 KiB |
260
routers/api/packages/alt/alt.go
Normal file
260
routers/api/packages/alt/alt.go
Normal file
|
@ -0,0 +1,260 @@
|
|||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package alt
|
||||
|
||||
import (
|
||||
stdctx "context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
packages_model "code.gitea.io/gitea/models/packages"
|
||||
"code.gitea.io/gitea/modules/json"
|
||||
packages_module "code.gitea.io/gitea/modules/packages"
|
||||
rpm_module "code.gitea.io/gitea/modules/packages/rpm"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
"code.gitea.io/gitea/routers/api/packages/helper"
|
||||
"code.gitea.io/gitea/services/context"
|
||||
notify_service "code.gitea.io/gitea/services/notify"
|
||||
packages_service "code.gitea.io/gitea/services/packages"
|
||||
alt_service "code.gitea.io/gitea/services/packages/alt"
|
||||
)
|
||||
|
||||
func apiError(ctx *context.Context, status int, obj any) {
|
||||
helper.LogAndProcessError(ctx, status, obj, func(message string) {
|
||||
ctx.PlainText(status, message)
|
||||
})
|
||||
}
|
||||
|
||||
func GetRepositoryConfig(ctx *context.Context) {
|
||||
group := ctx.Params("group")
|
||||
|
||||
var groupParts []string
|
||||
if group != "" {
|
||||
groupParts = strings.Split(group, "/")
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%sapi/packages/%s/alt", setting.AppURL, ctx.Package.Owner.Name)
|
||||
|
||||
ctx.PlainText(http.StatusOK, `[gitea-`+strings.Join(append([]string{ctx.Package.Owner.LowerName}, groupParts...), "-")+`]
|
||||
name=`+strings.Join(append([]string{ctx.Package.Owner.Name, setting.AppName}, groupParts...), " - ")+`
|
||||
baseurl=`+strings.Join(append([]string{url}, groupParts...), "/")+`
|
||||
enabled=1`)
|
||||
}
|
||||
|
||||
// Gets a pre-generated repository metadata file
|
||||
func GetRepositoryFile(ctx *context.Context, arch string) {
|
||||
pv, err := alt_service.GetOrCreateRepositoryVersion(ctx, ctx.Package.Owner.ID)
|
||||
if err != nil {
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
s, u, pf, err := packages_service.GetFileStreamByPackageVersion(
|
||||
ctx,
|
||||
pv,
|
||||
&packages_service.PackageFileInfo{
|
||||
Filename: ctx.Params("filename"),
|
||||
CompositeKey: arch + "__" + ctx.Params("group"),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrNotExist) {
|
||||
apiError(ctx, http.StatusNotFound, err)
|
||||
} else {
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
helper.ServePackageFile(ctx, s, u, pf)
|
||||
}
|
||||
|
||||
func UploadPackageFile(ctx *context.Context) {
|
||||
upload, needToClose, err := ctx.UploadStream()
|
||||
if err != nil {
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
if needToClose {
|
||||
defer upload.Close()
|
||||
}
|
||||
|
||||
buf, err := packages_module.CreateHashedBufferFromReader(upload)
|
||||
if err != nil {
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
defer buf.Close()
|
||||
|
||||
pck, err := rpm_module.ParsePackage(buf, "alt")
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrInvalidArgument) {
|
||||
apiError(ctx, http.StatusBadRequest, err)
|
||||
} else {
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if _, err := buf.Seek(0, io.SeekStart); err != nil {
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
fileMetadataRaw, err := json.Marshal(pck.FileMetadata)
|
||||
if err != nil {
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
group := ctx.Params("group")
|
||||
_, _, err = packages_service.CreatePackageOrAddFileToExisting(
|
||||
ctx,
|
||||
&packages_service.PackageCreationInfo{
|
||||
PackageInfo: packages_service.PackageInfo{
|
||||
Owner: ctx.Package.Owner,
|
||||
PackageType: packages_model.TypeAlt,
|
||||
Name: pck.Name,
|
||||
Version: pck.Version,
|
||||
},
|
||||
Creator: ctx.Doer,
|
||||
Metadata: pck.VersionMetadata,
|
||||
},
|
||||
&packages_service.PackageFileCreationInfo{
|
||||
PackageFileInfo: packages_service.PackageFileInfo{
|
||||
Filename: fmt.Sprintf("%s-%s.%s.rpm", pck.Name, pck.Version, pck.FileMetadata.Architecture),
|
||||
CompositeKey: group,
|
||||
},
|
||||
Creator: ctx.Doer,
|
||||
Data: buf,
|
||||
IsLead: true,
|
||||
Properties: map[string]string{
|
||||
rpm_module.PropertyGroup: group,
|
||||
rpm_module.PropertyArchitecture: pck.FileMetadata.Architecture,
|
||||
rpm_module.PropertyMetadata: string(fileMetadataRaw),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case packages_model.ErrDuplicatePackageVersion, packages_model.ErrDuplicatePackageFile:
|
||||
apiError(ctx, http.StatusConflict, err)
|
||||
case packages_service.ErrQuotaTotalCount, packages_service.ErrQuotaTypeSize, packages_service.ErrQuotaTotalSize:
|
||||
apiError(ctx, http.StatusForbidden, err)
|
||||
default:
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := alt_service.BuildSpecificRepositoryFiles(ctx, ctx.Package.Owner.ID, group); err != nil {
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusCreated)
|
||||
}
|
||||
|
||||
func DownloadPackageFile(ctx *context.Context) {
|
||||
name := ctx.Params("name")
|
||||
version := ctx.Params("version")
|
||||
|
||||
s, u, pf, err := packages_service.GetFileStreamByPackageNameAndVersion(
|
||||
ctx,
|
||||
&packages_service.PackageInfo{
|
||||
Owner: ctx.Package.Owner,
|
||||
PackageType: packages_model.TypeAlt,
|
||||
Name: name,
|
||||
Version: version,
|
||||
},
|
||||
&packages_service.PackageFileInfo{
|
||||
Filename: fmt.Sprintf("%s-%s.%s.rpm", name, version, ctx.Params("architecture")),
|
||||
CompositeKey: ctx.Params("group"),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrNotExist) {
|
||||
apiError(ctx, http.StatusNotFound, err)
|
||||
} else {
|
||||
apiError(ctx, http.StatusInternalServerError, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
helper.ServePackageFile(ctx, s, u, pf)
|
||||
}
|
||||
|
||||
func DeletePackageFile(webctx *context.Context) {
|
||||
group := webctx.Params("group")
|
||||
name := webctx.Params("name")
|
||||
version := webctx.Params("version")
|
||||
architecture := webctx.Params("architecture")
|
||||
|
||||
var pd *packages_model.PackageDescriptor
|
||||
|
||||
err := db.WithTx(webctx, func(ctx stdctx.Context) error {
|
||||
pv, err := packages_model.GetVersionByNameAndVersion(ctx,
|
||||
webctx.Package.Owner.ID,
|
||||
packages_model.TypeAlt,
|
||||
name,
|
||||
version,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pf, err := packages_model.GetFileForVersionByName(
|
||||
ctx,
|
||||
pv.ID,
|
||||
fmt.Sprintf("%s-%s.%s.rpm", name, version, architecture),
|
||||
group,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
has, err := packages_model.HasVersionFileReferences(ctx, pv.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !has {
|
||||
pd, err = packages_model.GetPackageDescriptor(ctx, pv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := packages_service.DeletePackageVersionAndReferences(ctx, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrNotExist) {
|
||||
apiError(webctx, http.StatusNotFound, err)
|
||||
} else {
|
||||
apiError(webctx, http.StatusInternalServerError, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if pd != nil {
|
||||
notify_service.PackageDelete(webctx, webctx.Doer, pd)
|
||||
}
|
||||
|
||||
if err := alt_service.BuildSpecificRepositoryFiles(webctx, webctx.Package.Owner.ID, group); err != nil {
|
||||
apiError(webctx, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
webctx.Status(http.StatusNoContent)
|
||||
}
|
|
@ -15,6 +15,7 @@ import (
|
|||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/web"
|
||||
"code.gitea.io/gitea/routers/api/packages/alpine"
|
||||
"code.gitea.io/gitea/routers/api/packages/alt"
|
||||
"code.gitea.io/gitea/routers/api/packages/arch"
|
||||
"code.gitea.io/gitea/routers/api/packages/cargo"
|
||||
"code.gitea.io/gitea/routers/api/packages/chef"
|
||||
|
@ -624,6 +625,73 @@ func CommonRoutes() *web.Route {
|
|||
ctx.Status(http.StatusNotFound)
|
||||
})
|
||||
}, reqPackageAccess(perm.AccessModeRead))
|
||||
r.Group("/alt", func() {
|
||||
var (
|
||||
baseURLPattern = regexp.MustCompile(`\A(.*?)\.repo\z`)
|
||||
uploadPattern = regexp.MustCompile(`\A(.*?)/upload\z`)
|
||||
baseRepoPattern = regexp.MustCompile(`(\S+)\.repo/(\S+)\/base/(\S+)`)
|
||||
rpmsRepoPattern = regexp.MustCompile(`(\S+)\.repo/(\S+)\.(\S+)\/([a-zA-Z0-9_-]+)-([\d.]+-[a-zA-Z0-9_-]+)\.(\S+)\.rpm`)
|
||||
)
|
||||
|
||||
r.Methods("HEAD,GET,PUT,DELETE", "*", func(ctx *context.Context) {
|
||||
path := ctx.Params("*")
|
||||
isGetHead := ctx.Req.Method == "HEAD" || ctx.Req.Method == "GET"
|
||||
isPut := ctx.Req.Method == "PUT"
|
||||
isDelete := ctx.Req.Method == "DELETE"
|
||||
|
||||
m := baseURLPattern.FindStringSubmatch(path)
|
||||
if len(m) == 2 && isGetHead {
|
||||
ctx.SetParams("group", strings.Trim(m[1], "/"))
|
||||
alt.GetRepositoryConfig(ctx)
|
||||
return
|
||||
}
|
||||
|
||||
m = baseRepoPattern.FindStringSubmatch(path)
|
||||
if len(m) == 4 {
|
||||
if strings.Trim(m[1], "/") != "alt" {
|
||||
ctx.SetParams("group", strings.Trim(m[1], "/"))
|
||||
}
|
||||
ctx.SetParams("filename", m[3])
|
||||
if isGetHead {
|
||||
alt.GetRepositoryFile(ctx, m[2])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
m = uploadPattern.FindStringSubmatch(path)
|
||||
if len(m) == 2 && isPut {
|
||||
reqPackageAccess(perm.AccessModeWrite)(ctx)
|
||||
if ctx.Written() {
|
||||
return
|
||||
}
|
||||
ctx.SetParams("group", strings.Trim(m[1], "/"))
|
||||
alt.UploadPackageFile(ctx)
|
||||
return
|
||||
}
|
||||
|
||||
m = rpmsRepoPattern.FindStringSubmatch(path)
|
||||
if len(m) == 7 && (isGetHead || isDelete) {
|
||||
if strings.Trim(m[1], "/") != "alt" {
|
||||
ctx.SetParams("group", strings.Trim(m[1], "/"))
|
||||
}
|
||||
ctx.SetParams("name", m[4])
|
||||
ctx.SetParams("version", m[5])
|
||||
ctx.SetParams("architecture", m[6])
|
||||
if isGetHead {
|
||||
alt.DownloadPackageFile(ctx)
|
||||
} else {
|
||||
reqPackageAccess(perm.AccessModeWrite)(ctx)
|
||||
if ctx.Written() {
|
||||
return
|
||||
}
|
||||
alt.DeletePackageFile(ctx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusNotFound)
|
||||
})
|
||||
}, reqPackageAccess(perm.AccessModeRead))
|
||||
r.Group("/rubygems", func() {
|
||||
r.Get("/specs.4.8.gz", rubygems.EnumeratePackages)
|
||||
r.Get("/latest_specs.4.8.gz", rubygems.EnumeratePackagesLatest)
|
||||
|
|
|
@ -193,7 +193,7 @@ func deleteBlob(ctx context.Context, ownerID int64, image, digest string) error
|
|||
}
|
||||
|
||||
func digestFromHashSummer(h packages_module.HashSummer) string {
|
||||
_, _, hashSHA256, _ := h.Sums()
|
||||
_, _, hashSHA256, _, _ := h.Sums()
|
||||
return "sha256:" + hex.EncodeToString(hashSHA256)
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ func UploadPackageFile(ctx *context.Context) {
|
|||
}
|
||||
defer buf.Close()
|
||||
|
||||
_, _, hashSHA256, _ := buf.Sums()
|
||||
_, _, hashSHA256, _, _ := buf.Sums()
|
||||
|
||||
if !strings.EqualFold(ctx.Req.FormValue("sha256_digest"), hex.EncodeToString(hashSHA256)) {
|
||||
apiError(ctx, http.StatusBadRequest, "hash mismatch")
|
||||
|
|
|
@ -149,7 +149,7 @@ func UploadPackageFile(ctx *context.Context) {
|
|||
buf = signedBuf
|
||||
}
|
||||
|
||||
pck, err := rpm_module.ParsePackage(buf)
|
||||
pck, err := rpm_module.ParsePackage(buf, "rpm")
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrInvalidArgument) {
|
||||
apiError(ctx, http.StatusBadRequest, err)
|
||||
|
|
|
@ -235,7 +235,7 @@ func ViewPackageVersion(ctx *context.Context) {
|
|||
ctx.Data["Distributions"] = util.Sorted(distributions.Values())
|
||||
ctx.Data["Components"] = util.Sorted(components.Values())
|
||||
ctx.Data["Architectures"] = util.Sorted(architectures.Values())
|
||||
case packages_model.TypeRpm:
|
||||
case packages_model.TypeRpm, packages_model.TypeAlt:
|
||||
groups := make(container.Set[string])
|
||||
architectures := make(container.Set[string])
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
type PackageCleanupRuleForm struct {
|
||||
ID int64
|
||||
Enabled bool
|
||||
Type string `binding:"Required;In(alpine,arch,cargo,chef,composer,conan,conda,container,cran,debian,generic,go,helm,maven,npm,nuget,pub,pypi,rpm,rubygems,swift,vagrant)"`
|
||||
Type string `binding:"Required;In(alpine,arch,cargo,chef,composer,conan,conda,container,cran,debian,generic,go,helm,maven,npm,nuget,pub,pypi,rpm,alt,rubygems,swift,vagrant)"`
|
||||
KeepCount int `binding:"In(0,1,5,10,25,50,100)"`
|
||||
KeepPattern string `binding:"RegexPattern"`
|
||||
RemoveDays int `binding:"In(0,7,14,30,60,90,180)"`
|
||||
|
|
921
services/packages/alt/reposirory.go
Normal file
921
services/packages/alt/reposirory.go
Normal file
|
@ -0,0 +1,921 @@
|
|||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package alt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
packages_model "code.gitea.io/gitea/models/packages"
|
||||
alt_model "code.gitea.io/gitea/models/packages/alt"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/json"
|
||||
packages_module "code.gitea.io/gitea/modules/packages"
|
||||
rpm_module "code.gitea.io/gitea/modules/packages/rpm"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
packages_service "code.gitea.io/gitea/services/packages"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
// GetOrCreateRepositoryVersion gets or creates the internal repository package
|
||||
// The RPM registry needs multiple metadata files which are stored in this package.
|
||||
func GetOrCreateRepositoryVersion(ctx context.Context, ownerID int64) (*packages_model.PackageVersion, error) {
|
||||
return packages_service.GetOrCreateInternalPackageVersion(ctx, ownerID, packages_model.TypeAlt, rpm_module.RepositoryPackage, rpm_module.RepositoryVersion)
|
||||
}
|
||||
|
||||
// BuildAllRepositoryFiles (re)builds all repository files for every available group
|
||||
func BuildAllRepositoryFiles(ctx context.Context, ownerID int64) error {
|
||||
pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 1. Delete all existing repository files
|
||||
pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pf := range pfs {
|
||||
if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 2. (Re)Build repository files for existing packages
|
||||
groups, err := alt_model.GetGroups(ctx, ownerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, group := range groups {
|
||||
if err := BuildSpecificRepositoryFiles(ctx, ownerID, group); err != nil {
|
||||
return fmt.Errorf("failed to build repository files [%s]: %w", group, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type repoChecksum struct {
|
||||
Value string `xml:",chardata"`
|
||||
Type string `xml:"type,attr"`
|
||||
}
|
||||
|
||||
type repoLocation struct {
|
||||
Href string `xml:"href,attr"`
|
||||
}
|
||||
|
||||
type repoData struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Checksum repoChecksum `xml:"checksum"`
|
||||
MD5Checksum repoChecksum `xml:"md5checksum"`
|
||||
Blake2bHash repoChecksum `xml:"blake2bHash"`
|
||||
OpenChecksum repoChecksum `xml:"open-checksum"`
|
||||
Location repoLocation `xml:"location"`
|
||||
Timestamp int64 `xml:"timestamp"`
|
||||
Size int64 `xml:"size"`
|
||||
OpenSize int64 `xml:"open-size"`
|
||||
}
|
||||
|
||||
type packageData struct {
|
||||
Package *packages_model.Package
|
||||
Version *packages_model.PackageVersion
|
||||
Blob *packages_model.PackageBlob
|
||||
VersionMetadata *rpm_module.VersionMetadata
|
||||
FileMetadata *rpm_module.FileMetadata
|
||||
}
|
||||
|
||||
type packageCache = map[*packages_model.PackageFile]*packageData
|
||||
|
||||
// BuildSpecificRepositoryFiles builds metadata files for the repository
|
||||
func BuildSpecificRepositoryFiles(ctx context.Context, ownerID int64, group string) error {
|
||||
pv, err := GetOrCreateRepositoryVersion(ctx, ownerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pfs, _, err := packages_model.SearchFiles(ctx, &packages_model.PackageFileSearchOptions{
|
||||
OwnerID: ownerID,
|
||||
PackageType: packages_model.TypeAlt,
|
||||
Query: "%.rpm",
|
||||
CompositeKey: group,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the repository files if there are no packages
|
||||
if len(pfs) == 0 {
|
||||
pfs, err := packages_model.GetFilesByVersionID(ctx, pv.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, pf := range pfs {
|
||||
if err := packages_service.DeletePackageFile(ctx, pf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cache data needed for all repository files
|
||||
cache := make(packageCache)
|
||||
for _, pf := range pfs {
|
||||
pv, err := packages_model.GetVersionByID(ctx, pf.VersionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p, err := packages_model.GetPackageByID(ctx, pv.PackageID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb, err := packages_model.GetBlobByID(ctx, pf.BlobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pps, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeFile, pf.ID, rpm_module.PropertyMetadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pd := &packageData{
|
||||
Package: p,
|
||||
Version: pv,
|
||||
Blob: pb,
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(pv.MetadataJSON), &pd.VersionMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(pps) > 0 {
|
||||
if err := json.Unmarshal([]byte(pps[0].Value), &pd.FileMetadata); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cache[pf] = pd
|
||||
}
|
||||
|
||||
pkglist, err := buildPackageLists(ctx, pv, pfs, cache, group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = buildRelease(ctx, pv, pfs, cache, group, pkglist)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type RPMHeader struct {
|
||||
Magic [4]byte
|
||||
Reserved [4]byte
|
||||
NIndex uint32
|
||||
HSize uint32
|
||||
}
|
||||
|
||||
type RPMHdrIndex struct {
|
||||
Tag uint32
|
||||
Type uint32
|
||||
Offset uint32
|
||||
Count uint32
|
||||
}
|
||||
|
||||
// https://refspecs.linuxbase.org/LSB_4.0.0/LSB-Core-generic/LSB-Core-generic/pkgformat.html
|
||||
func buildPackageLists(ctx context.Context, pv *packages_model.PackageVersion, pfs []*packages_model.PackageFile, c packageCache, group string) (map[string][]any, error) {
|
||||
architectures := []string{}
|
||||
|
||||
for _, pf := range pfs {
|
||||
pd := c[pf]
|
||||
|
||||
if !slices.Contains(architectures, pd.FileMetadata.Architecture) {
|
||||
architectures = append(architectures, pd.FileMetadata.Architecture)
|
||||
}
|
||||
}
|
||||
|
||||
repoDataListByArch := make(map[string][]any)
|
||||
repoDataList := []any{}
|
||||
orderedHeaders := []*RPMHeader{}
|
||||
|
||||
for i := range architectures {
|
||||
headersWithIndexes := make(map[*RPMHeader]map[*RPMHdrIndex][]any)
|
||||
headersWithPtrs := make(map[*RPMHeader][]*RPMHdrIndex)
|
||||
indexPtrs := []*RPMHdrIndex{}
|
||||
indexes := make(map[*RPMHdrIndex][]any)
|
||||
|
||||
for _, pf := range pfs {
|
||||
pd := c[pf]
|
||||
|
||||
if pd.FileMetadata.Architecture == architectures[i] {
|
||||
var requireNames []any
|
||||
var requireVersions []any
|
||||
var requireFlags []any
|
||||
requireNamesSize := 0
|
||||
requireVersionsSize := 0
|
||||
requireFlagsSize := 0
|
||||
|
||||
for _, entry := range pd.FileMetadata.Requires {
|
||||
if entry != nil {
|
||||
requireNames = append(requireNames, entry.Name)
|
||||
requireVersions = append(requireVersions, entry.Version)
|
||||
requireFlags = append(requireFlags, entry.AltFlags)
|
||||
requireNamesSize += len(entry.Name) + 1
|
||||
requireVersionsSize += len(entry.Version) + 1
|
||||
requireFlagsSize += 4
|
||||
}
|
||||
}
|
||||
|
||||
var conflictNames []any
|
||||
var conflictVersions []any
|
||||
var conflictFlags []any
|
||||
conflictNamesSize := 0
|
||||
conflictVersionsSize := 0
|
||||
conflictFlagsSize := 0
|
||||
|
||||
for _, entry := range pd.FileMetadata.Conflicts {
|
||||
if entry != nil {
|
||||
conflictNames = append(conflictNames, entry.Name)
|
||||
conflictVersions = append(conflictVersions, entry.Version)
|
||||
conflictFlags = append(conflictFlags, entry.AltFlags)
|
||||
conflictNamesSize += len(entry.Name) + 1
|
||||
conflictVersionsSize += len(entry.Version) + 1
|
||||
conflictFlagsSize += 4
|
||||
}
|
||||
}
|
||||
|
||||
var baseNames []any
|
||||
var dirNames []any
|
||||
baseNamesSize := 0
|
||||
dirNamesSize := 0
|
||||
|
||||
for _, entry := range pd.FileMetadata.Files {
|
||||
if entry != nil {
|
||||
re := regexp.MustCompile(`(.*?/)([^/]*)$`)
|
||||
matches := re.FindStringSubmatch(entry.Path)
|
||||
if len(matches) == 3 {
|
||||
baseNames = append(baseNames, matches[2])
|
||||
dirNames = append(dirNames, matches[1])
|
||||
baseNamesSize += len(matches[2]) + 1
|
||||
dirNamesSize += len(matches[1]) + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var provideNames []any
|
||||
var provideVersions []any
|
||||
var provideFlags []any
|
||||
provideNamesSize := 0
|
||||
provideVersionsSize := 0
|
||||
provideFlagsSize := 0
|
||||
|
||||
for _, entry := range pd.FileMetadata.Provides {
|
||||
if entry != nil {
|
||||
provideNames = append(provideNames, entry.Name)
|
||||
provideVersions = append(provideVersions, entry.Version)
|
||||
provideFlags = append(provideFlags, entry.AltFlags)
|
||||
provideNamesSize += len(entry.Name) + 1
|
||||
provideVersionsSize += len(entry.Version) + 1
|
||||
provideFlagsSize += 4
|
||||
}
|
||||
}
|
||||
|
||||
var obsoleteNames []any
|
||||
var obsoleteVersions []any
|
||||
var obsoleteFlags []any
|
||||
obsoleteNamesSize := 0
|
||||
obsoleteVersionsSize := 0
|
||||
obsoleteFlagsSize := 0
|
||||
|
||||
for _, entry := range pd.FileMetadata.Obsoletes {
|
||||
if entry != nil {
|
||||
obsoleteNames = append(obsoleteNames, entry.Name)
|
||||
obsoleteVersions = append(obsoleteVersions, entry.Version)
|
||||
obsoleteFlags = append(obsoleteFlags, entry.AltFlags)
|
||||
obsoleteNamesSize += len(entry.Name) + 1
|
||||
obsoleteVersionsSize += len(entry.Version) + 1
|
||||
obsoleteFlagsSize += 4
|
||||
}
|
||||
}
|
||||
|
||||
var changeLogTimes []any
|
||||
var changeLogNames []any
|
||||
var changeLogTexts []any
|
||||
changeLogTimesSize := 0
|
||||
changeLogNamesSize := 0
|
||||
changeLogTextsSize := 0
|
||||
|
||||
for _, entry := range pd.FileMetadata.Changelogs {
|
||||
if entry != nil {
|
||||
changeLogNames = append(changeLogNames, entry.Author)
|
||||
changeLogTexts = append(changeLogTexts, entry.Text)
|
||||
changeLogTimes = append(changeLogTimes, uint32(int64(entry.Date)))
|
||||
changeLogNamesSize += len(entry.Author) + 1
|
||||
changeLogTextsSize += len(entry.Text) + 1
|
||||
changeLogTimesSize += 4
|
||||
}
|
||||
}
|
||||
|
||||
/*Header*/
|
||||
hdr := &RPMHeader{
|
||||
Magic: [4]byte{0x8E, 0xAD, 0xE8, 0x01},
|
||||
Reserved: [4]byte{0, 0, 0, 0},
|
||||
NIndex: binary.BigEndian.Uint32([]byte{0, 0, 0, 0}),
|
||||
HSize: binary.BigEndian.Uint32([]byte{0, 0, 0, 0}),
|
||||
}
|
||||
orderedHeaders = append(orderedHeaders, hdr)
|
||||
|
||||
/*Tags: */
|
||||
|
||||
nameInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 232}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: 0,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &nameInd)
|
||||
indexes[&nameInd] = append(indexes[&nameInd], pd.Package.Name)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.Package.Name) + 1)
|
||||
|
||||
// Индекс для версии пакета
|
||||
versionInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 233}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &versionInd)
|
||||
indexes[&versionInd] = append(indexes[&versionInd], pd.FileMetadata.Version)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.FileMetadata.Version) + 1)
|
||||
|
||||
summaryInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 236}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 9}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &summaryInd)
|
||||
indexes[&summaryInd] = append(indexes[&summaryInd], pd.VersionMetadata.Summary)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.VersionMetadata.Summary) + 1)
|
||||
|
||||
descriptionInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 237}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 9}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &descriptionInd)
|
||||
indexes[&descriptionInd] = append(indexes[&descriptionInd], pd.VersionMetadata.Description)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.VersionMetadata.Description) + 1)
|
||||
|
||||
releaseInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 234}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &releaseInd)
|
||||
indexes[&releaseInd] = append(indexes[&releaseInd], pd.FileMetadata.Release)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.FileMetadata.Release) + 1)
|
||||
|
||||
alignPadding(hdr, indexes, &releaseInd)
|
||||
|
||||
sizeInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 241}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 4}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &sizeInd)
|
||||
indexes[&sizeInd] = append(indexes[&sizeInd], int32(pd.FileMetadata.InstalledSize))
|
||||
hdr.NIndex++
|
||||
hdr.HSize += 4
|
||||
|
||||
buildTimeInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 238}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 4}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &buildTimeInd)
|
||||
indexes[&buildTimeInd] = append(indexes[&buildTimeInd], int32(pd.FileMetadata.BuildTime))
|
||||
hdr.NIndex++
|
||||
hdr.HSize += 4
|
||||
|
||||
licenseInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 246}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &licenseInd)
|
||||
indexes[&licenseInd] = append(indexes[&licenseInd], pd.VersionMetadata.License)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.VersionMetadata.License) + 1)
|
||||
|
||||
packagerInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 247}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &packagerInd)
|
||||
indexes[&packagerInd] = append(indexes[&packagerInd], pd.FileMetadata.Packager)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.FileMetadata.Packager) + 1)
|
||||
|
||||
groupInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 248}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &groupInd)
|
||||
indexes[&groupInd] = append(indexes[&groupInd], pd.FileMetadata.Group)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.FileMetadata.Group) + 1)
|
||||
|
||||
urlInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 252}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &urlInd)
|
||||
indexes[&urlInd] = append(indexes[&urlInd], pd.VersionMetadata.ProjectURL)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.VersionMetadata.ProjectURL) + 1)
|
||||
|
||||
if len(changeLogNames) != 0 && len(changeLogTexts) != 0 && len(changeLogTimes) != 0 {
|
||||
alignPadding(hdr, indexes, &urlInd)
|
||||
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x38}, []byte{0, 0, 0, 4}, changeLogTimes, changeLogTimesSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x39}, []byte{0, 0, 0, 8}, changeLogNames, changeLogNamesSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x3A}, []byte{0, 0, 0, 8}, changeLogTexts, changeLogTextsSize)
|
||||
}
|
||||
|
||||
archInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0, 0, 3, 254}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &archInd)
|
||||
indexes[&archInd] = append(indexes[&archInd], pd.FileMetadata.Architecture)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.FileMetadata.Architecture) + 1)
|
||||
|
||||
if len(provideNames) != 0 && len(provideVersions) != 0 && len(provideFlags) != 0 {
|
||||
alignPadding(hdr, indexes, &archInd)
|
||||
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x58}, []byte{0, 0, 0, 4}, provideFlags, provideFlagsSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x17}, []byte{0, 0, 0, 8}, provideNames, provideNamesSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x59}, []byte{0, 0, 0, 8}, provideVersions, provideVersionsSize)
|
||||
}
|
||||
|
||||
sourceRpmInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0x00, 0x00, 0x04, 0x14}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &sourceRpmInd)
|
||||
indexes[&sourceRpmInd] = append(indexes[&sourceRpmInd], pd.FileMetadata.SourceRpm)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += binary.BigEndian.Uint32([]byte{0, 0, 0, uint8(len(pd.FileMetadata.SourceRpm) + 1)})
|
||||
|
||||
if len(requireNames) != 0 && len(requireVersions) != 0 && len(requireFlags) != 0 {
|
||||
alignPadding(hdr, indexes, &sourceRpmInd)
|
||||
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x18}, []byte{0, 0, 0, 4}, requireFlags, requireFlagsSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0, 0, 4, 25}, []byte{0, 0, 0, 8}, requireNames, requireNamesSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x1A}, []byte{0, 0, 0, 8}, requireVersions, requireVersionsSize)
|
||||
}
|
||||
|
||||
if len(baseNames) != 0 {
|
||||
baseNamesInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0x00, 0x00, 0x04, 0x5D}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 8}),
|
||||
Offset: hdr.HSize,
|
||||
Count: uint32(len(baseNames)),
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &baseNamesInd)
|
||||
indexes[&baseNamesInd] = baseNames
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(baseNamesSize)
|
||||
}
|
||||
|
||||
if len(dirNames) != 0 {
|
||||
dirnamesInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0x00, 0x00, 0x04, 0x5E}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 8}),
|
||||
Offset: hdr.HSize,
|
||||
Count: uint32(len(dirNames)),
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &dirnamesInd)
|
||||
indexes[&dirnamesInd] = dirNames
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(dirNamesSize)
|
||||
}
|
||||
|
||||
filenameInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0x00, 0x0F, 0x42, 0x40}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &filenameInd)
|
||||
indexes[&filenameInd] = append(indexes[&filenameInd], pf.Name)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pf.Name) + 1)
|
||||
|
||||
alignPadding(hdr, indexes, &filenameInd)
|
||||
|
||||
filesizeInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0x00, 0x0F, 0x42, 0x41}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 4}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &filesizeInd)
|
||||
indexes[&filesizeInd] = append(indexes[&filesizeInd], int32(pd.Blob.Size))
|
||||
hdr.NIndex++
|
||||
hdr.HSize += 4
|
||||
|
||||
md5Ind := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0x00, 0x0F, 0x42, 0x45}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &md5Ind)
|
||||
indexes[&md5Ind] = append(indexes[&md5Ind], pd.Blob.HashMD5)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.Blob.HashMD5) + 1)
|
||||
|
||||
blake2bInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0x00, 0x0F, 0x42, 0x49}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &blake2bInd)
|
||||
indexes[&blake2bInd] = append(indexes[&blake2bInd], pd.Blob.HashBlake2b)
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(len(pd.Blob.HashBlake2b) + 1)
|
||||
|
||||
if len(conflictNames) != 0 && len(conflictVersions) != 0 && len(conflictFlags) != 0 {
|
||||
alignPadding(hdr, indexes, &blake2bInd)
|
||||
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x1D}, []byte{0, 0, 0, 4}, conflictFlags, conflictFlagsSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x1E}, []byte{0, 0, 0, 8}, conflictNames, conflictNamesSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x1F}, []byte{0, 0, 0, 8}, conflictVersions, conflictVersionsSize)
|
||||
}
|
||||
|
||||
directoryInd := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32([]byte{0x00, 0x0F, 0x42, 0x4A}),
|
||||
Type: binary.BigEndian.Uint32([]byte{0, 0, 0, 6}),
|
||||
Offset: hdr.HSize,
|
||||
Count: 1,
|
||||
}
|
||||
indexPtrs = append(indexPtrs, &directoryInd)
|
||||
indexes[&directoryInd] = append(indexes[&directoryInd], "RPMS.classic")
|
||||
hdr.NIndex++
|
||||
hdr.HSize += binary.BigEndian.Uint32([]byte{0, 0, 0, uint8(len("RPMS.classic") + 1)})
|
||||
|
||||
if len(obsoleteNames) != 0 && len(obsoleteVersions) != 0 && len(obsoleteFlags) != 0 {
|
||||
alignPadding(hdr, indexes, &directoryInd)
|
||||
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x5A}, []byte{0, 0, 0, 4}, obsoleteFlags, obsoleteFlagsSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x42}, []byte{0, 0, 0, 8}, obsoleteNames, obsoleteNamesSize)
|
||||
addRPMHdrIndex(hdr, &indexPtrs, indexes, []byte{0x00, 0x00, 0x04, 0x5B}, []byte{0, 0, 0, 8}, obsoleteVersions, obsoleteVersionsSize)
|
||||
}
|
||||
|
||||
headersWithIndexes[hdr] = indexes
|
||||
headersWithPtrs[hdr] = indexPtrs
|
||||
|
||||
indexPtrs = []*RPMHdrIndex{}
|
||||
indexes = make(map[*RPMHdrIndex][]any)
|
||||
}
|
||||
}
|
||||
|
||||
files := []string{"pkglist.classic", "pkglist.classic.xz"}
|
||||
for file := range files {
|
||||
fileInfo, err := addPkglistAsFileToRepo(ctx, pv, files[file], headersWithIndexes, headersWithPtrs, orderedHeaders, group, architectures[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repoDataList = append(repoDataList, fileInfo)
|
||||
repoDataListByArch[architectures[i]] = repoDataList
|
||||
}
|
||||
repoDataList = []any{}
|
||||
orderedHeaders = []*RPMHeader{}
|
||||
}
|
||||
return repoDataListByArch, nil
|
||||
}
|
||||
|
||||
func alignPadding(hdr *RPMHeader, indexes map[*RPMHdrIndex][]any, lastIndex *RPMHdrIndex) {
|
||||
/* Align to 4-bytes to add a 4-byte element. */
|
||||
padding := (4 - (hdr.HSize % 4)) % 4
|
||||
if padding == 4 {
|
||||
padding = 0
|
||||
}
|
||||
hdr.HSize += binary.BigEndian.Uint32([]byte{0, 0, 0, uint8(padding)})
|
||||
|
||||
for i := uint32(0); i < padding; i++ {
|
||||
for _, elem := range indexes[lastIndex] {
|
||||
if str, ok := elem.(string); ok {
|
||||
indexes[lastIndex][len(indexes[lastIndex])-1] = str + "\x00"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addRPMHdrIndex(hdr *RPMHeader, indexPtrs *[]*RPMHdrIndex, indexes map[*RPMHdrIndex][]any, tag, typeByte []byte, data []any, dataSize int) {
|
||||
index := RPMHdrIndex{
|
||||
Tag: binary.BigEndian.Uint32(tag),
|
||||
Type: binary.BigEndian.Uint32(typeByte),
|
||||
Offset: hdr.HSize,
|
||||
Count: uint32(len(data)),
|
||||
}
|
||||
*indexPtrs = append(*indexPtrs, &index)
|
||||
indexes[&index] = data
|
||||
hdr.NIndex++
|
||||
hdr.HSize += uint32(dataSize)
|
||||
}
|
||||
|
||||
// https://www.altlinux.org/APT_в_ALT_Linux/CreateRepository
|
||||
func buildRelease(ctx context.Context, pv *packages_model.PackageVersion, pfs []*packages_model.PackageFile, c packageCache, group string, pkglist map[string][]any) error {
|
||||
var buf bytes.Buffer
|
||||
|
||||
architectures := []string{}
|
||||
|
||||
for _, pf := range pfs {
|
||||
pd := c[pf]
|
||||
if !slices.Contains(architectures, pd.FileMetadata.Architecture) {
|
||||
architectures = append(architectures, pd.FileMetadata.Architecture)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range architectures {
|
||||
archive := "Alt Linux Team"
|
||||
component := "classic"
|
||||
version := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
architectures := architectures[i]
|
||||
origin := "Alt Linux Team"
|
||||
label := setting.AppName
|
||||
notautomatic := "false"
|
||||
data := fmt.Sprintf("Archive: %s\nComponent: %s\nVersion: %s\nOrigin: %s\nLabel: %s\nArchitecture: %s\nNotAutomatic: %s",
|
||||
archive, component, version, origin, label, architectures, notautomatic)
|
||||
buf.WriteString(data + "\n")
|
||||
fileInfo, err := addReleaseAsFileToRepo(ctx, pv, "release.classic", buf.String(), group, architectures)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.Reset()
|
||||
|
||||
origin = setting.AppName
|
||||
suite := "Sisyphus"
|
||||
codename := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
|
||||
var md5Sum string
|
||||
var blake2b string
|
||||
|
||||
for _, pkglistByArch := range pkglist[architectures] {
|
||||
md5Sum += fmt.Sprintf(" %s %s %s\n", pkglistByArch.([]string)[2], pkglistByArch.([]string)[4], "base/"+pkglistByArch.([]string)[0])
|
||||
blake2b += fmt.Sprintf(" %s %s %s\n", pkglistByArch.([]string)[3], pkglistByArch.([]string)[4], "base/"+pkglistByArch.([]string)[0])
|
||||
}
|
||||
md5Sum += fmt.Sprintf(" %s %s %s\n", fileInfo[2], fileInfo[4], "base/"+fileInfo[0])
|
||||
blake2b += fmt.Sprintf(" %s %s %s\n", fileInfo[3], fileInfo[4], "base/"+fileInfo[0])
|
||||
|
||||
data = fmt.Sprintf("Origin: %s\nLabel: %s\nSuite: %s\nCodename: %s\nDate: %s\nArchitectures: %s\nMD5Sum:\n%sBLAKE2b:\n%s\n",
|
||||
origin, label, suite, codename, date, architectures, md5Sum, blake2b)
|
||||
buf.WriteString(data + "\n")
|
||||
_, err = addReleaseAsFileToRepo(ctx, pv, "release", buf.String(), group, architectures)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.Reset()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addReleaseAsFileToRepo(ctx context.Context, pv *packages_model.PackageVersion, filename, obj, group, arch string) ([]string, error) {
|
||||
content, _ := packages_module.NewHashedBuffer()
|
||||
defer content.Close()
|
||||
|
||||
h := sha256.New()
|
||||
|
||||
w := io.MultiWriter(content, h)
|
||||
if _, err := w.Write([]byte(obj)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err := packages_service.AddFileToPackageVersionInternal(
|
||||
ctx,
|
||||
pv,
|
||||
&packages_service.PackageFileCreationInfo{
|
||||
PackageFileInfo: packages_service.PackageFileInfo{
|
||||
Filename: filename,
|
||||
CompositeKey: arch + "__" + group,
|
||||
},
|
||||
Creator: user_model.NewGhostUser(),
|
||||
Data: content,
|
||||
IsLead: false,
|
||||
OverwriteExisting: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hashMD5, _, hashSHA256, _, hashBlake2b := content.Sums()
|
||||
|
||||
if group == "" {
|
||||
group = "alt"
|
||||
}
|
||||
|
||||
repoData := &repoData{
|
||||
Type: filename,
|
||||
Checksum: repoChecksum{
|
||||
Type: "sha256",
|
||||
Value: hex.EncodeToString(hashSHA256),
|
||||
},
|
||||
MD5Checksum: repoChecksum{
|
||||
Type: "md5",
|
||||
Value: hex.EncodeToString(hashMD5),
|
||||
},
|
||||
OpenChecksum: repoChecksum{
|
||||
Type: "sha256",
|
||||
Value: hex.EncodeToString(h.Sum(nil)),
|
||||
},
|
||||
Blake2bHash: repoChecksum{
|
||||
Type: "blake2b",
|
||||
Value: hex.EncodeToString(hashBlake2b),
|
||||
},
|
||||
Location: repoLocation{
|
||||
Href: group + ".repo/" + arch + "/base/" + filename,
|
||||
},
|
||||
Size: content.Size(),
|
||||
/* Unused values:
|
||||
Timestamp: time.Now().Unix(),
|
||||
OpenSize: content.Size(), */
|
||||
}
|
||||
|
||||
data := []string{
|
||||
repoData.Type, repoData.Checksum.Value,
|
||||
repoData.MD5Checksum.Value, repoData.Blake2bHash.Value, strconv.Itoa(int(repoData.Size)),
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func addPkglistAsFileToRepo(ctx context.Context, pv *packages_model.PackageVersion, filename string, headersWithIndexes map[*RPMHeader]map[*RPMHdrIndex][]any, headersWithPtrs map[*RPMHeader][]*RPMHdrIndex, orderedHeaders []*RPMHeader, group, arch string) ([]string, error) {
|
||||
content, _ := packages_module.NewHashedBuffer()
|
||||
defer content.Close()
|
||||
|
||||
h := sha256.New()
|
||||
w := io.MultiWriter(content, h)
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
for _, hdr := range orderedHeaders {
|
||||
if err := binary.Write(buf, binary.BigEndian, hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, indexPtr := range headersWithPtrs[hdr] {
|
||||
index := *indexPtr
|
||||
|
||||
if err := binary.Write(buf, binary.BigEndian, index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, indexPtr := range headersWithPtrs[hdr] {
|
||||
for _, indexValue := range headersWithIndexes[hdr][indexPtr] {
|
||||
switch v := indexValue.(type) {
|
||||
case string:
|
||||
if _, err := buf.WriteString(v + "\x00"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case int, int32, int64, uint32:
|
||||
if err := binary.Write(buf, binary.BigEndian, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parts := strings.Split(filename, ".")
|
||||
|
||||
if len(parts) == 3 && parts[len(parts)-1] == "xz" {
|
||||
xzContent, err := compressXZ(buf.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := w.Write(xzContent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if _, err := w.Write(buf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
_, err := packages_service.AddFileToPackageVersionInternal(
|
||||
ctx,
|
||||
pv,
|
||||
&packages_service.PackageFileCreationInfo{
|
||||
PackageFileInfo: packages_service.PackageFileInfo{
|
||||
Filename: filename,
|
||||
CompositeKey: arch + "__" + group,
|
||||
},
|
||||
Creator: user_model.NewGhostUser(),
|
||||
Data: content,
|
||||
IsLead: false,
|
||||
OverwriteExisting: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hashMD5, _, hashSHA256, _, hashBlake2b := content.Sums()
|
||||
|
||||
if group == "" {
|
||||
group = "alt"
|
||||
}
|
||||
|
||||
repoData := &repoData{
|
||||
Type: filename,
|
||||
Checksum: repoChecksum{
|
||||
Type: "sha256",
|
||||
Value: hex.EncodeToString(hashSHA256),
|
||||
},
|
||||
MD5Checksum: repoChecksum{
|
||||
Type: "md5",
|
||||
Value: hex.EncodeToString(hashMD5),
|
||||
},
|
||||
OpenChecksum: repoChecksum{
|
||||
Type: "sha256",
|
||||
Value: hex.EncodeToString(h.Sum(nil)),
|
||||
},
|
||||
Blake2bHash: repoChecksum{
|
||||
Type: "blake2b",
|
||||
Value: hex.EncodeToString(hashBlake2b),
|
||||
},
|
||||
Location: repoLocation{
|
||||
Href: group + ".repo/" + arch + "/base/" + filename,
|
||||
},
|
||||
Size: content.Size(),
|
||||
/* Unused values:
|
||||
Timestamp: time.Now().Unix(),
|
||||
OpenSize: content.Size(), */
|
||||
}
|
||||
|
||||
data := []string{
|
||||
repoData.Type, repoData.Checksum.Value,
|
||||
repoData.MD5Checksum.Value, repoData.Blake2bHash.Value, strconv.Itoa(int(repoData.Size)),
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func compressXZ(data []byte) ([]byte, error) {
|
||||
var xzContent bytes.Buffer
|
||||
xzWriter, err := xz.NewWriter(&xzContent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer xzWriter.Close()
|
||||
|
||||
if _, err := xzWriter.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := xzWriter.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return xzContent.Bytes(), nil
|
||||
}
|
|
@ -16,6 +16,7 @@ import (
|
|||
packages_module "code.gitea.io/gitea/modules/packages"
|
||||
packages_service "code.gitea.io/gitea/services/packages"
|
||||
alpine_service "code.gitea.io/gitea/services/packages/alpine"
|
||||
alt_service "code.gitea.io/gitea/services/packages/alt"
|
||||
arch_service "code.gitea.io/gitea/services/packages/arch"
|
||||
cargo_service "code.gitea.io/gitea/services/packages/cargo"
|
||||
container_service "code.gitea.io/gitea/services/packages/container"
|
||||
|
@ -137,6 +138,10 @@ func ExecuteCleanupRules(outerCtx context.Context) error {
|
|||
if err := arch_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
|
||||
return fmt.Errorf("CleanupRule [%d]: arch.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
|
||||
}
|
||||
} else if pcr.Type == packages_model.TypeAlt {
|
||||
if err := alt_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
|
||||
return fmt.Errorf("CleanupRule [%d]: alt.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -244,14 +244,15 @@ func addFileToPackageWrapper(ctx context.Context, fn func(ctx context.Context) (
|
|||
|
||||
// NewPackageBlob creates a package blob instance
|
||||
func NewPackageBlob(hsr packages_module.HashedSizeReader) *packages_model.PackageBlob {
|
||||
hashMD5, hashSHA1, hashSHA256, hashSHA512 := hsr.Sums()
|
||||
hashMD5, hashSHA1, hashSHA256, hashSHA512, hashBlake2b := hsr.Sums()
|
||||
|
||||
return &packages_model.PackageBlob{
|
||||
Size: hsr.Size(),
|
||||
HashMD5: hex.EncodeToString(hashMD5),
|
||||
HashSHA1: hex.EncodeToString(hashSHA1),
|
||||
HashSHA256: hex.EncodeToString(hashSHA256),
|
||||
HashSHA512: hex.EncodeToString(hashSHA512),
|
||||
Size: hsr.Size(),
|
||||
HashMD5: hex.EncodeToString(hashMD5),
|
||||
HashSHA1: hex.EncodeToString(hashSHA1),
|
||||
HashSHA256: hex.EncodeToString(hashSHA256),
|
||||
HashSHA512: hex.EncodeToString(hashSHA512),
|
||||
HashBlake2b: hex.EncodeToString(hashBlake2b),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -395,6 +396,8 @@ func CheckSizeQuotaExceeded(ctx context.Context, doer, owner *user_model.User, p
|
|||
typeSpecificSize = setting.Packages.LimitSizePyPI
|
||||
case packages_model.TypeRpm:
|
||||
typeSpecificSize = setting.Packages.LimitSizeRpm
|
||||
case packages_model.TypeAlt:
|
||||
typeSpecificSize = setting.Packages.LimitSizeAlt
|
||||
case packages_model.TypeRubyGems:
|
||||
typeSpecificSize = setting.Packages.LimitSizeRubyGems
|
||||
case packages_model.TypeSwift:
|
||||
|
|
|
@ -622,7 +622,7 @@ func addDataAsFileToRepo(ctx context.Context, pv *packages_model.PackageVersion,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
_, _, hashSHA256, _ := content.Sums()
|
||||
_, _, hashSHA256, _, _ := content.Sums()
|
||||
|
||||
return &repoData{
|
||||
Type: filetype,
|
||||
|
|
49
templates/package/content/alt.tmpl
Normal file
49
templates/package/content/alt.tmpl
Normal file
|
@ -0,0 +1,49 @@
|
|||
{{if eq .PackageDescriptor.Package.Type "alt"}}
|
||||
<h4 class="ui top attached header">{{ctx.Locale.Tr "packages.installation"}}</h4>
|
||||
<div class="ui attached segment">
|
||||
<div class="ui form">
|
||||
<div class="field">
|
||||
<label>{{svg "octicon-terminal"}} {{ctx.Locale.Tr "packages.alt.registry"}}</label>
|
||||
<div class="markup"><pre class="code-block"><code>{{- if gt (len .Groups) 1 -}}
|
||||
# {{ctx.Locale.Tr "packages.alt.repository.multiple_groups"}}
|
||||
|
||||
{{end -}}
|
||||
# {{ctx.Locale.Tr "packages.alt.setup"}}
|
||||
{{- range $group := .Groups}}
|
||||
{{- if $group}}{{$group = print "/" $group}}{{end}}
|
||||
apt-repo add rpm <origin-url data-url="{{AppSubUrl}}/api/packages/{{$.PackageDescriptor.Owner.Name}}/alt{{- if $group}}{{$group}}{{- else}}/alt{{- end}}.repo"></origin-url> _arch_ classic
|
||||
|
||||
{{- end}}</code></pre></div>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label>{{svg "octicon-terminal"}} {{ctx.Locale.Tr "packages.alt.install"}}</label>
|
||||
<div class="markup">
|
||||
<pre class="code-block"><code># {{ctx.Locale.Tr "packages.alt.registry.install"}}
|
||||
apt-get update
|
||||
apt-get install {{$.PackageDescriptor.Package.Name}}</code></pre>
|
||||
</div>
|
||||
</div>
|
||||
<div class="field">
|
||||
<label>{{ctx.Locale.Tr "packages.registry.documentation" "ALT" "https://docs.gitea.com/usage/packages/alt/"}}</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h4 class="ui top attached header">{{ctx.Locale.Tr "packages.alt.repository"}}</h4>
|
||||
<div class="ui attached segment">
|
||||
<table class="ui single line very basic table">
|
||||
<tbody>
|
||||
<tr>
|
||||
<td class="collapsing"><h5>{{ctx.Locale.Tr "packages.alt.repository.architectures"}}</h5></td>
|
||||
<td>{{StringUtils.Join .Architectures ", "}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
{{if or .PackageDescriptor.Metadata.Summary .PackageDescriptor.Metadata.Description}}
|
||||
<h4 class="ui top attached header">{{ctx.Locale.Tr "packages.about"}}</h4>
|
||||
{{if .PackageDescriptor.Metadata.Summary}}<div class="ui attached segment">{{.PackageDescriptor.Metadata.Summary}}</div>{{end}}
|
||||
{{if .PackageDescriptor.Metadata.Description}}<div class="ui attached segment">{{.PackageDescriptor.Metadata.Description}}</div>{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
4
templates/package/metadata/alt.tmpl
Normal file
4
templates/package/metadata/alt.tmpl
Normal file
|
@ -0,0 +1,4 @@
|
|||
{{if eq .PackageDescriptor.Package.Type "alt"}}
|
||||
{{if .PackageDescriptor.Metadata.ProjectURL}}<div class="item">{{svg "octicon-link-external" 16 "tw-mr-2"}} <a href="{{.PackageDescriptor.Metadata.ProjectURL}}" target="_blank" rel="noopener noreferrer me">{{ctx.Locale.Tr "packages.details.project_site"}}</a></div>{{end}}
|
||||
{{if .PackageDescriptor.Metadata.License}}<div class="item" title="{{ctx.Locale.Tr "packages.details.license"}}">{{svg "octicon-law" 16 "tw-mr-2"}} {{.PackageDescriptor.Metadata.License}}</div>{{end}}
|
||||
{{end}}
|
|
@ -37,6 +37,7 @@
|
|||
{{template "package/content/pub" .}}
|
||||
{{template "package/content/pypi" .}}
|
||||
{{template "package/content/rpm" .}}
|
||||
{{template "package/content/alt" .}}
|
||||
{{template "package/content/rubygems" .}}
|
||||
{{template "package/content/swift" .}}
|
||||
{{template "package/content/vagrant" .}}
|
||||
|
@ -68,6 +69,7 @@
|
|||
{{template "package/metadata/pub" .}}
|
||||
{{template "package/metadata/pypi" .}}
|
||||
{{template "package/metadata/rpm" .}}
|
||||
{{template "package/metadata/alt" .}}
|
||||
{{template "package/metadata/rubygems" .}}
|
||||
{{template "package/metadata/swift" .}}
|
||||
{{template "package/metadata/vagrant" .}}
|
||||
|
|
658
tests/integration/api_packages_alt_test.go
Normal file
658
tests/integration/api_packages_alt_test.go
Normal file
|
@ -0,0 +1,658 @@
|
|||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/models/packages"
|
||||
"code.gitea.io/gitea/models/unittest"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
packages_module "code.gitea.io/gitea/modules/packages"
|
||||
rpm_module "code.gitea.io/gitea/modules/packages/rpm"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
"code.gitea.io/gitea/tests"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
func TestPackageAlt(t *testing.T) {
|
||||
defer tests.PrepareTestEnv(t)()
|
||||
|
||||
packageName := "gitea-test"
|
||||
packageVersion := "1.0.2-1"
|
||||
packageArchitecture := "x86_64"
|
||||
|
||||
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
|
||||
|
||||
base64RpmPackageContent := `H4sICFayB2QCAGdpdGVhLXRlc3QtMS4wLjItMS14ODZfNjQucnBtAO2YV4gTQRjHJzl7wbNhhxVF
|
||||
VNwk2zd2PdvZ9Sxnd3Z3NllNsmF3o6congVFsWFHRWwIImIXfRER0QcRfPBJEXvvBQvWSfZTT0VQ
|
||||
8TF/MuU33zcz3+zOJGEe73lyuQBRBWKWRzDrEddjuVAkxLMc+lsFUOWfm5bvvReAalWECg/TsivU
|
||||
dyKa0U61aVnl6wj0Uxe4nc8F92hZiaYE8CO/P0r7/Quegr0c7M/AvoCaGZEIWNGUqMHrhhGROIUT
|
||||
Zc7gOAOraoQzCNZ0WdU0HpEI5jiB4zlek3gT85wqCBomhomxoGCs8wImWMImbxqKgXVNUKKaqShR
|
||||
STKVKK9glFUNcf2g+/t27xs16v5x/eyOKftVGlIhyiuvvPLKK6+88sorr7zyyiuvvPKCO5HPnz+v
|
||||
pGVhhXsTsFVeSstuWR9anwU+Bk3Vch5wTwL3JkHg+8C1gR8A169wj1KdpobAj4HbAT+Be5VewE+h
|
||||
fz/g52AvBX4N9vHAb4AnA7+F8ePAH8BuA38ELgf+BLzQ50oIeBlw0OdAOXAlP57AGuCsbwGtbgCu
|
||||
DrwRuAb4bwau6T/PwFbgWsDXgWuD/y3gOmC/B1wI/Bi4AcT3Arih3z9YCNzI9w9m/YKUG4Nd9N9z
|
||||
pSZgHwrcFPgccFt//OADGE+F/q+Ao+D/FrijzwV1gbv4/QvaAHcFDgF3B5aB+wB3Be7rz1dQCtwP
|
||||
eDxwMcw3GbgU7AasdwzYE8DjwT4L/CeAvRx4IvBCYA3iWQds+FzpDjABfghsAj8BTgA/A/b8+StX
|
||||
A84A1wKe5s9fuRB4JpzHZv55rL8a/Dv49vpn/PErR4BvQX8Z+Db4l2W5CH2/f0W5+1fEoeFDBzFp
|
||||
rE/FMcK4mWQSOzN+aDOIqztW2rPsFKIyqh7sQERR42RVMSKihnzVHlQ8Ag0YLBYNEIajkhmuR5Io
|
||||
7nlpt2M4nJs0ZNkoYaUyZahMlSfJImr1n1WjFVNCPCaTZgYNGdGL8YN2mX8WHfA/C7ViHJK0pxHG
|
||||
SrkeTiSI4T+7ubf85yrzRCQRQ5EVxVAjvIBVRY/KRFAVReIkhfARSddNSceayQkGliIKb0q8RAxJ
|
||||
5QWNVxHIsW3Pz369bw+5jh5y0klE9Znqm0dF57b0HbGy2A5lVUBTZZrqZjdUjYoprFmpsBtHP5d0
|
||||
+ISltS2yk2mHuC4x+lgJMhgnidvuqy3b0suK0bm+tw3FMxI2zjm7/fA0MtQhplX2s7nYLZ2ZC0yg
|
||||
CxJZDokhORTJlrlcCvG5OieGBERlVCs7CfuS6WzQ/T2j+9f92BWxTFEcp2IkYccYGp2LYySEfreq
|
||||
irue4WRF5XkpKovw2wgpq2rZBI8bQZkzxEkiYaNwxnXCCVvHidzIiB3CM2yMYdNWmjDsaLovaE4c
|
||||
x3a6mLaTxB7rEj3jWN4M2p7uwPaa1GfI8BHFfcZMKhkycnhR7y781/a+A4t7FpWWTupRUtKbegwZ
|
||||
XMKwJinTSe70uhRcj55qNu3YHtE922Fdz7FTMTq9Q3TbMdiYrrPudMvT44S6u2miu138eC0tTN9D
|
||||
2CFGHHtQsHHsGCRFDFbXuT9wx6mUTZfseydlkWZeJkW6xOgYjqXT+LA7I6XHaUx2xmUzqelWymA9
|
||||
rCXI9+D1BHbjsITssqhBNysw0tOWjcpmIh6+aViYPfftw8ZSGfRVPUqKiosZj5R5qGmk/8AjjRbZ
|
||||
d8b3vvngdPHx3HvMeCarIk7VVSwbgoZVkceEVyOmyUmGxBGNYDVKSFSOGlIkGqWnUZFkiY/wsmhK
|
||||
Mu0UFYgZ/bYnuvn/vz4wtCz8qMwsHUvP0PX3tbYFUctAPdrY6tiiDtcCddDECahx7SuVNP5dpmb5
|
||||
9tMDyaXb7OAlk5acuPn57ss9mw6Wym0m1Fq2cej7tUt2LL4/b8enXU2fndk+fvv57ndnt55/cQob
|
||||
7tpp/pEjDS7cGPZ6BY430+7danDq6f42Nw49b9F7zp6BiKpJb9s5P0AYN2+L159cnrur636rx+v1
|
||||
7ae1K28QbMMcqI8CqwIrgwg9nTOp8Oj9q81plUY7ZuwXN8Vvs8wbAAA=`
|
||||
rpmPackageContent, err := base64.StdEncoding.DecodeString(base64RpmPackageContent)
|
||||
require.NoError(t, err)
|
||||
|
||||
zr, err := gzip.NewReader(bytes.NewReader(rpmPackageContent))
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := io.ReadAll(zr)
|
||||
require.NoError(t, err)
|
||||
|
||||
rootURL := fmt.Sprintf("/api/packages/%s/alt", user.Name)
|
||||
|
||||
for _, group := range []string{"", "el9", "el9/stable"} {
|
||||
t.Run(fmt.Sprintf("[Group:%s]", group), func(t *testing.T) {
|
||||
var groupParts []string
|
||||
uploadURL := rootURL
|
||||
if group != "" {
|
||||
groupParts = strings.Split(group, "/")
|
||||
uploadURL = strings.Join(append([]string{rootURL}, groupParts...), "/")
|
||||
} else {
|
||||
groupParts = strings.Split("alt", "/")
|
||||
}
|
||||
groupURL := strings.Join(append([]string{rootURL}, groupParts...), "/")
|
||||
|
||||
t.Run("RepositoryConfig", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
req := NewRequest(t, "GET", groupURL+".repo")
|
||||
resp := MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
expected := fmt.Sprintf(`[gitea-%s]
|
||||
name=%s
|
||||
baseurl=%s
|
||||
enabled=1`,
|
||||
strings.Join(append([]string{user.LowerName}, groupParts...), "-"),
|
||||
strings.Join(append([]string{user.Name, setting.AppName}, groupParts...), " - "),
|
||||
util.URLJoin(setting.AppURL, groupURL),
|
||||
)
|
||||
|
||||
assert.Equal(t, expected, resp.Body.String())
|
||||
})
|
||||
|
||||
t.Run("Upload", func(t *testing.T) {
|
||||
url := uploadURL + "/upload"
|
||||
|
||||
req := NewRequestWithBody(t, "PUT", url, bytes.NewReader(content))
|
||||
MakeRequest(t, req, http.StatusUnauthorized)
|
||||
|
||||
req = NewRequestWithBody(t, "PUT", url, bytes.NewReader(content)).
|
||||
AddBasicAuth(user.Name)
|
||||
MakeRequest(t, req, http.StatusCreated)
|
||||
|
||||
pvs, err := packages.GetVersionsByPackageType(db.DefaultContext, user.ID, packages.TypeAlt)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pvs, 1)
|
||||
|
||||
pd, err := packages.GetPackageDescriptor(db.DefaultContext, pvs[0])
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, pd.SemVer)
|
||||
assert.IsType(t, &rpm_module.VersionMetadata{}, pd.Metadata)
|
||||
assert.Equal(t, packageName, pd.Package.Name)
|
||||
assert.Equal(t, packageVersion, pd.Version.Version)
|
||||
|
||||
pfs, err := packages.GetFilesByVersionID(db.DefaultContext, pvs[0].ID)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pfs, 1)
|
||||
assert.Equal(t, fmt.Sprintf("%s-%s.%s.rpm", packageName, packageVersion, packageArchitecture), pfs[0].Name)
|
||||
assert.True(t, pfs[0].IsLead)
|
||||
|
||||
pb, err := packages.GetBlobByID(db.DefaultContext, pfs[0].BlobID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(len(content)), pb.Size)
|
||||
|
||||
req = NewRequestWithBody(t, "PUT", url, bytes.NewReader(content)).
|
||||
AddBasicAuth(user.Name)
|
||||
MakeRequest(t, req, http.StatusConflict)
|
||||
})
|
||||
|
||||
t.Run("Download", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
req := NewRequest(t, "GET", fmt.Sprintf("%s.repo/%s/RPMS.classic/%s-%s.%s.rpm", groupURL, packageArchitecture, packageName, packageVersion, packageArchitecture))
|
||||
resp := MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
assert.Equal(t, content, resp.Body.Bytes())
|
||||
})
|
||||
|
||||
t.Run("Repository", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
url := fmt.Sprintf("%s.repo/%s/base", groupURL, packageArchitecture)
|
||||
|
||||
req := NewRequest(t, "HEAD", url+"/dummy.xml")
|
||||
MakeRequest(t, req, http.StatusNotFound)
|
||||
|
||||
req = NewRequest(t, "GET", url+"/dummy.xml")
|
||||
MakeRequest(t, req, http.StatusNotFound)
|
||||
|
||||
t.Run("release.classic", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
req = NewRequest(t, "HEAD", url+"/release.classic")
|
||||
MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
req = NewRequest(t, "GET", url+"/release.classic")
|
||||
resp := MakeRequest(t, req, http.StatusOK).Body.String()
|
||||
|
||||
type ReleaseClassic struct {
|
||||
Archive string
|
||||
Component string
|
||||
Origin string
|
||||
Label string
|
||||
Architecture string
|
||||
NotAutomatic bool
|
||||
}
|
||||
|
||||
var result ReleaseClassic
|
||||
|
||||
lines := strings.Split(resp, "\n")
|
||||
|
||||
for _, line := range lines {
|
||||
parts := strings.SplitN(line, ": ", 2)
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch parts[0] {
|
||||
case "Archive":
|
||||
result.Archive = parts[1]
|
||||
case "Component":
|
||||
result.Component = parts[1]
|
||||
case "Origin":
|
||||
result.Origin = parts[1]
|
||||
case "Label":
|
||||
result.Label = parts[1]
|
||||
case "Architecture":
|
||||
result.Architecture = parts[1]
|
||||
case "NotAutomatic":
|
||||
notAuto, err := strconv.ParseBool(parts[1])
|
||||
if err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
result.NotAutomatic = notAuto
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, "classic", result.Component)
|
||||
assert.Equal(t, "Alt Linux Team", result.Origin)
|
||||
assert.Equal(t, "Forgejo", result.Label)
|
||||
assert.Equal(t, "x86_64", result.Architecture)
|
||||
assert.False(t, result.NotAutomatic)
|
||||
assert.NotEmpty(t, result.Archive)
|
||||
})
|
||||
|
||||
t.Run("release", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
req = NewRequest(t, "HEAD", url+"/release")
|
||||
MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
req = NewRequest(t, "GET", url+"/release")
|
||||
resp := MakeRequest(t, req, http.StatusOK).Body.String()
|
||||
|
||||
type Checksum struct {
|
||||
Hash string
|
||||
Size int
|
||||
File string
|
||||
}
|
||||
|
||||
type Release struct {
|
||||
Origin string
|
||||
Label string
|
||||
Suite string
|
||||
Architectures string
|
||||
MD5Sum []Checksum
|
||||
BLAKE2B []Checksum
|
||||
}
|
||||
|
||||
var result Release
|
||||
|
||||
lines := strings.Split(resp, "\n")
|
||||
|
||||
var isMD5Sum, isBLAKE2b bool
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(line, "Origin:"):
|
||||
result.Origin = strings.TrimSpace(strings.TrimPrefix(line, "Origin:"))
|
||||
case strings.HasPrefix(line, "Label:"):
|
||||
result.Label = strings.TrimSpace(strings.TrimPrefix(line, "Label:"))
|
||||
case strings.HasPrefix(line, "Suite:"):
|
||||
result.Suite = strings.TrimSpace(strings.TrimPrefix(line, "Suite:"))
|
||||
case strings.HasPrefix(line, "Architectures:"):
|
||||
result.Architectures = strings.TrimSpace(strings.TrimPrefix(line, "Architectures:"))
|
||||
case line == "MD5Sum:":
|
||||
isMD5Sum = true
|
||||
isBLAKE2b = false
|
||||
case line == "BLAKE2b:":
|
||||
isBLAKE2b = true
|
||||
isMD5Sum = false
|
||||
case isMD5Sum || isBLAKE2b:
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 3 {
|
||||
hash := parts[0]
|
||||
size, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
file := parts[2]
|
||||
|
||||
checksum := Checksum{
|
||||
Hash: hash,
|
||||
Size: size,
|
||||
File: file,
|
||||
}
|
||||
|
||||
if isMD5Sum {
|
||||
result.MD5Sum = append(result.MD5Sum, checksum)
|
||||
} else if isBLAKE2b {
|
||||
result.BLAKE2B = append(result.BLAKE2B, checksum)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, "Forgejo", result.Origin)
|
||||
assert.Equal(t, "Forgejo", result.Label)
|
||||
assert.Equal(t, "Sisyphus", result.Suite)
|
||||
assert.Equal(t, "x86_64", result.Architectures)
|
||||
|
||||
assert.Len(t, result.MD5Sum, 3)
|
||||
assert.Equal(t, "bbf7ae6b2f540673ed1cfc0266b5f319", result.MD5Sum[0].Hash)
|
||||
assert.Equal(t, 1003, result.MD5Sum[0].Size)
|
||||
assert.Equal(t, "base/pkglist.classic", result.MD5Sum[0].File)
|
||||
|
||||
assert.Len(t, result.BLAKE2B, 3)
|
||||
assert.Equal(t, "b527bf038895ce29107ec3a6d2eebd7c365e8ce5ab767276eeddd7c549a159025225cb0ecfdbf7b71da13db7e865e77bcb0e2dae4d21335df01a4a17e0056a70", result.BLAKE2B[0].Hash)
|
||||
assert.Equal(t, 1003, result.BLAKE2B[0].Size)
|
||||
assert.Equal(t, "base/pkglist.classic", result.BLAKE2B[0].File)
|
||||
})
|
||||
|
||||
t.Run("pkglist.classic", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
req = NewRequest(t, "GET", url+"/pkglist.classic")
|
||||
resp := MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
body := resp.Body
|
||||
defer body.Reset()
|
||||
|
||||
type RpmHeader struct {
|
||||
Magic [8]byte
|
||||
Nindex uint32
|
||||
Hsize uint32
|
||||
}
|
||||
|
||||
type RpmHdrIndex struct {
|
||||
Tag uint32
|
||||
Type uint32
|
||||
Offset uint32
|
||||
Count uint32
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
Name string
|
||||
Version string
|
||||
Release string
|
||||
Summary []string
|
||||
Description []string
|
||||
BuildTime int
|
||||
Size int
|
||||
License string
|
||||
Packager string
|
||||
Group []string
|
||||
URL string
|
||||
Arch string
|
||||
SourceRpm string
|
||||
ProvideNames []string
|
||||
RequireFlags []int
|
||||
RequireNames []string
|
||||
RequireVersions []string
|
||||
ChangeLogTimes []int
|
||||
ChangeLogNames []string
|
||||
ChangeLogTexts []string
|
||||
ProvideFlags []int
|
||||
ProvideVersions []string
|
||||
DirIndexes []int
|
||||
BaseNames []string
|
||||
DirNames []string
|
||||
DistTag string
|
||||
AptIndexLegacyFileName string
|
||||
AptIndexLegacyFileSize int
|
||||
MD5Sum string
|
||||
BLAKE2B string
|
||||
AptIndexLegacyDirectory string
|
||||
}
|
||||
|
||||
var result Metadata
|
||||
|
||||
const rpmHeaderMagic = "\x8e\xad\xe8\x01\x00\x00\x00\x00"
|
||||
|
||||
var hdr RpmHeader
|
||||
for {
|
||||
if err := binary.Read(body, binary.BigEndian, &hdr); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(hdr.Magic[:], []byte(rpmHeaderMagic)) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
nindex := hdr.Nindex
|
||||
index := make([]RpmHdrIndex, nindex)
|
||||
if err := binary.Read(body, binary.BigEndian, &index); err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
data := make([]byte, hdr.Hsize)
|
||||
if err := binary.Read(body, binary.BigEndian, &data); err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var indexPtrs []*RpmHdrIndex
|
||||
for i := range index {
|
||||
indexPtrs = append(indexPtrs, &index[i])
|
||||
}
|
||||
|
||||
for _, idx := range indexPtrs {
|
||||
tag := binary.BigEndian.Uint32([]byte{byte(idx.Tag >> 24), byte(idx.Tag >> 16), byte(idx.Tag >> 8), byte(idx.Tag)})
|
||||
typ := binary.BigEndian.Uint32([]byte{byte(idx.Type >> 24), byte(idx.Type >> 16), byte(idx.Type >> 8), byte(idx.Type)})
|
||||
offset := binary.BigEndian.Uint32([]byte{byte(idx.Offset >> 24), byte(idx.Offset >> 16), byte(idx.Offset >> 8), byte(idx.Offset)})
|
||||
count := binary.BigEndian.Uint32([]byte{byte(idx.Count >> 24), byte(idx.Count >> 16), byte(idx.Count >> 8), byte(idx.Count)})
|
||||
|
||||
if typ == 6 || typ == 8 || typ == 9 {
|
||||
elem := data[offset:]
|
||||
for j := uint32(0); j < count; j++ {
|
||||
strEnd := bytes.IndexByte(elem, 0)
|
||||
if strEnd == -1 {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
switch tag {
|
||||
case 1000:
|
||||
result.Name = string(elem[:strEnd])
|
||||
case 1001:
|
||||
result.Version = string(elem[:strEnd])
|
||||
case 1002:
|
||||
result.Release = string(elem[:strEnd])
|
||||
case 1004:
|
||||
var summaries []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
summaries = append(summaries, string(elem[:strEnd]))
|
||||
}
|
||||
result.Summary = summaries
|
||||
case 1005:
|
||||
var descriptions []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
descriptions = append(descriptions, string(elem[:strEnd]))
|
||||
}
|
||||
result.Description = descriptions
|
||||
case 1014:
|
||||
result.License = string(elem[:strEnd])
|
||||
case 1015:
|
||||
result.Packager = string(elem[:strEnd])
|
||||
case 1016:
|
||||
var groups []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
groups = append(groups, string(elem[:strEnd]))
|
||||
}
|
||||
result.Group = groups
|
||||
case 1020:
|
||||
result.URL = string(elem[:strEnd])
|
||||
case 1022:
|
||||
result.Arch = string(elem[:strEnd])
|
||||
case 1044:
|
||||
result.SourceRpm = string(elem[:strEnd])
|
||||
case 1047:
|
||||
var provideNames []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
provideNames = append(provideNames, string(elem[:strEnd]))
|
||||
}
|
||||
result.ProvideNames = provideNames
|
||||
case 1049:
|
||||
var requireNames []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
requireNames = append(requireNames, string(elem[:strEnd]))
|
||||
}
|
||||
result.RequireNames = requireNames
|
||||
case 1050:
|
||||
var requireVersions []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
requireVersions = append(requireVersions, string(elem[:strEnd]))
|
||||
}
|
||||
result.RequireVersions = requireVersions
|
||||
case 1081:
|
||||
var changeLogNames []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
changeLogNames = append(changeLogNames, string(elem[:strEnd]))
|
||||
}
|
||||
result.ChangeLogNames = changeLogNames
|
||||
case 1082:
|
||||
var changeLogTexts []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
changeLogTexts = append(changeLogTexts, string(elem[:strEnd]))
|
||||
}
|
||||
result.ChangeLogTexts = changeLogTexts
|
||||
case 1113:
|
||||
var provideVersions []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
provideVersions = append(provideVersions, string(elem[:strEnd]))
|
||||
}
|
||||
result.ProvideVersions = provideVersions
|
||||
case 1117:
|
||||
var baseNames []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
baseNames = append(baseNames, string(elem[:strEnd]))
|
||||
}
|
||||
result.BaseNames = baseNames
|
||||
case 1118:
|
||||
var dirNames []string
|
||||
for i := uint32(0); i < count; i++ {
|
||||
dirNames = append(dirNames, string(elem[:strEnd]))
|
||||
}
|
||||
result.DirNames = dirNames
|
||||
case 1155:
|
||||
result.DistTag = string(elem[:strEnd])
|
||||
case 1000000:
|
||||
result.AptIndexLegacyFileName = string(elem[:strEnd])
|
||||
case 1000005:
|
||||
result.MD5Sum = string(elem[:strEnd])
|
||||
case 1000009:
|
||||
result.BLAKE2B = string(elem[:strEnd])
|
||||
case 1000010:
|
||||
result.AptIndexLegacyDirectory = string(elem[:strEnd])
|
||||
}
|
||||
elem = elem[strEnd+1:]
|
||||
}
|
||||
} else if typ == 4 {
|
||||
elem := data[offset:]
|
||||
for j := uint32(0); j < count; j++ {
|
||||
val := binary.BigEndian.Uint32(elem)
|
||||
switch tag {
|
||||
case 1006:
|
||||
result.BuildTime = int(val)
|
||||
case 1009:
|
||||
result.Size = int(val)
|
||||
case 1048:
|
||||
var requireFlags []int
|
||||
for i := uint32(0); i < count; i++ {
|
||||
requireFlags = append(requireFlags, int(val))
|
||||
}
|
||||
result.RequireFlags = requireFlags
|
||||
case 1080:
|
||||
var changeLogTimes []int
|
||||
for i := uint32(0); i < count; i++ {
|
||||
changeLogTimes = append(changeLogTimes, int(val))
|
||||
}
|
||||
result.ChangeLogTimes = changeLogTimes
|
||||
case 1112:
|
||||
var provideFlags []int
|
||||
for i := uint32(0); i < count; i++ {
|
||||
provideFlags = append(provideFlags, int(val))
|
||||
}
|
||||
result.ProvideFlags = provideFlags
|
||||
case 1116:
|
||||
var dirIndexes []int
|
||||
for i := uint32(0); i < count; i++ {
|
||||
dirIndexes = append(dirIndexes, int(val))
|
||||
}
|
||||
result.DirIndexes = dirIndexes
|
||||
case 1000001:
|
||||
result.AptIndexLegacyFileSize = int(val)
|
||||
}
|
||||
elem = elem[4:]
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(t, "gitea-test", result.Name)
|
||||
assert.Equal(t, "1.0.2", result.Version)
|
||||
assert.Equal(t, "1", result.Release)
|
||||
assert.Equal(t, []string{"RPM package summary"}, result.Summary)
|
||||
assert.Equal(t, []string{"RPM package description"}, result.Description)
|
||||
assert.Equal(t, 1678225964, result.BuildTime)
|
||||
assert.Equal(t, 13, result.Size)
|
||||
assert.Equal(t, "MIT", result.License)
|
||||
assert.Equal(t, "KN4CK3R", result.Packager)
|
||||
assert.Equal(t, []string{"System"}, result.Group)
|
||||
assert.Equal(t, "https://gitea.io", result.URL)
|
||||
assert.Equal(t, "x86_64", result.Arch)
|
||||
assert.Equal(t, "gitea-test-1.0.2-1.src.rpm", result.SourceRpm)
|
||||
assert.Equal(t, []string{"", ""}, result.ProvideNames)
|
||||
assert.Equal(t, []int{16777226, 16777226, 16777226, 16777226, 16777226, 16777226, 16777226}, result.RequireFlags)
|
||||
assert.Equal(t, []string{"", "", "", "", "", "", ""}, result.RequireNames)
|
||||
assert.Equal(t, []string{"5.2-1", "5.2-1", "5.2-1", "5.2-1", "5.2-1", "5.2-1", "5.2-1"}, result.RequireVersions)
|
||||
assert.Equal(t, []int{1678276800}, result.ChangeLogTimes)
|
||||
assert.Equal(t, []string{"KN4CK3R <dummy@gitea.io>"}, result.ChangeLogNames)
|
||||
assert.Equal(t, []string{"- Changelog message."}, result.ChangeLogTexts)
|
||||
assert.Equal(t, []int{8, 8}, result.ProvideFlags)
|
||||
assert.Equal(t, []string{"1.0.2-1", "1.0.2-1"}, result.ProvideVersions)
|
||||
assert.Equal(t, []int(nil), result.DirIndexes)
|
||||
assert.Equal(t, []string{"hello"}, result.BaseNames)
|
||||
assert.Equal(t, []string{"/usr/local/bin/"}, result.DirNames)
|
||||
assert.Equal(t, "", result.DistTag)
|
||||
assert.Equal(t, "gitea-test-1.0.2-1.x86_64.rpm", result.AptIndexLegacyFileName)
|
||||
assert.Equal(t, 7116, result.AptIndexLegacyFileSize)
|
||||
assert.Equal(t, "9ea82dd62968719aea19c08cd2ced79a", result.MD5Sum)
|
||||
assert.Equal(t, "8ba7f1f52a47b23997aa2de21b305cc71974d51f0c54fb53cb927156284dafdcc233d514a46c020e4a0666e218529e0284933c5873d24c2555830d7627140f7d", result.BLAKE2B)
|
||||
assert.Equal(t, "RPMS.classic", result.AptIndexLegacyDirectory)
|
||||
})
|
||||
|
||||
t.Run("pkglist.classic.xz", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
req := NewRequest(t, "GET", url+"/pkglist.classic.xz")
|
||||
pkglistXZResp := MakeRequest(t, req, http.StatusOK)
|
||||
pkglistXZ := pkglistXZResp.Body
|
||||
defer pkglistXZ.Reset()
|
||||
|
||||
req2 := NewRequest(t, "GET", url+"/pkglist.classic")
|
||||
pkglistResp := MakeRequest(t, req2, http.StatusOK)
|
||||
pkglist := pkglistResp.Body
|
||||
defer pkglist.Reset()
|
||||
|
||||
assert.Less(t, pkglistXZ.Len(), pkglist.Len())
|
||||
|
||||
xzReader, err := xz.NewReader(pkglistXZ)
|
||||
require.NoError(t, err)
|
||||
|
||||
var unxzData bytes.Buffer
|
||||
_, err = io.Copy(&unxzData, xzReader)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, unxzData.Len(), pkglist.Len())
|
||||
|
||||
content, _ := packages_module.NewHashedBuffer()
|
||||
defer content.Close()
|
||||
|
||||
h := sha256.New()
|
||||
w := io.MultiWriter(content, h)
|
||||
|
||||
_, err = io.Copy(w, pkglist)
|
||||
require.NoError(t, err)
|
||||
|
||||
hashMD5Classic, _, hashSHA256Classic, _, hashBlake2bClassic := content.Sums()
|
||||
|
||||
contentUnxz, _ := packages_module.NewHashedBuffer()
|
||||
defer contentUnxz.Close()
|
||||
|
||||
_, err = io.Copy(io.MultiWriter(contentUnxz, sha256.New()), &unxzData)
|
||||
require.NoError(t, err)
|
||||
|
||||
hashMD5Unxz, _, hashSHA256Unxz, _, hashBlake2bUnxz := contentUnxz.Sums()
|
||||
|
||||
assert.Equal(t, fmt.Sprintf("%x", hashSHA256Classic), fmt.Sprintf("%x", hashSHA256Unxz))
|
||||
assert.Equal(t, fmt.Sprintf("%x", hashBlake2bClassic), fmt.Sprintf("%x", hashBlake2bUnxz))
|
||||
assert.Equal(t, fmt.Sprintf("%x", hashMD5Classic), fmt.Sprintf("%x", hashMD5Unxz))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Delete", func(t *testing.T) {
|
||||
defer tests.PrintCurrentTest(t)()
|
||||
|
||||
req := NewRequest(t, "DELETE", fmt.Sprintf("%s.repo/%s/RPMS.classic/%s-%s.%s.rpm", groupURL, packageArchitecture, packageName, packageVersion, packageArchitecture))
|
||||
MakeRequest(t, req, http.StatusUnauthorized)
|
||||
|
||||
req = NewRequest(t, "DELETE", fmt.Sprintf("%s.repo/%s/RPMS.classic/%s-%s.%s.rpm", groupURL, packageArchitecture, packageName, packageVersion, packageArchitecture)).
|
||||
AddBasicAuth(user.Name)
|
||||
MakeRequest(t, req, http.StatusNoContent)
|
||||
|
||||
pvs, err := packages.GetVersionsByPackageType(db.DefaultContext, user.ID, packages.TypeAlt)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, pvs)
|
||||
req = NewRequest(t, "DELETE", fmt.Sprintf("%s.repo/%s/RPMS.classic/%s-%s.%s.rpm", groupURL, packageArchitecture, packageName, packageVersion, packageArchitecture)).
|
||||
AddBasicAuth(user.Name)
|
||||
MakeRequest(t, req, http.StatusNotFound)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
1
web_src/svg/gitea-alt.svg
Normal file
1
web_src/svg/gitea-alt.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 12 KiB |
Loading…
Add table
Reference in a new issue