// Package cache provides a content-addressed artifact and metadata cache for
// downloaded files. It is organized in three internal layers:
//
// - store: content-addressed blob IO (read/write/ingest/remove)
// - index: versioned manifest tracking cache entries with v1→v2 migration
// - policy: per-kind (artifact vs metadata) TTL and size-limit enforcement
//
// The primary consumer-facing API is [util.CachedDownload], which wraps HTTP
// downloads with streaming hash verification, cache storage, and optional
// progress bar injection via DownloadOptions.WrapReader.
//
// Cache entries are keyed by URL and classified as either KindArtifact (JARs,
// binaries — long TTL) or KindMetadata (version manifests, API responses —
// short TTL). Integrity verification is performed inline during download when
// an expected hash is provided.
package cache
import "sync"
var (
networkOnce sync.Once
networkHandler *handler
)
func Network() *handler {
networkOnce.Do(func() {
networkHandler = newHandler("network", DefaultCacheConfig())
})
return networkHandler
}
package cache
import (
"fmt"
"os"
"sync"
"time"
"github.com/mclucy/lucy/logger"
)
type handler struct {
mu sync.RWMutex
on bool
dir string
store *store
index *index
policy Policy
}
func newHandler(name string, cfg CacheConfig) (obj *handler) {
dir := setDir(name)
obj = &handler{
on: cfg.Enabled,
dir: dir,
store: newStore(dir),
policy: cfg.toPolicy(),
}
if !obj.on {
return obj
}
if err := os.MkdirAll(obj.dir, 0o700); err != nil {
logger.Warn(
fmt.Errorf(
"cannot create cache directory, disabling %s cache: %w",
name, err,
),
)
obj.on = false
return obj
}
idx := newIndex(fmt.Sprintf("%s/%s", obj.dir, manifestFilename))
if !idx.load() {
obj.on = false
return obj
}
obj.index = idx
if obj.on {
obj.clearExpiredCache()
obj.maintainCacheLimit()
if err := obj.index.flush(); err != nil {
logger.Warn(
fmt.Errorf(
"failed to update index on initialization: %w",
err,
),
)
}
}
return obj
}
func (h *handler) Add(
data []byte,
filename string,
k string,
expiration time.Duration,
) error {
if expiration == 0 {
expiration = h.policy.Artifact.TTL
}
return h.AddEntry(
data,
filename,
k,
KindArtifact,
Integrity{State: IntegrityUnverified},
expiration,
)
}
func (h *handler) AddEntry(
data []byte,
filename string,
k string,
kind EntryKind,
integrity Integrity,
expiration time.Duration,
) error {
h.mu.Lock()
defer h.mu.Unlock()
if !h.on {
return nil
}
if expiration == 0 {
expiration = h.policy.ConfigFor(kind).TTL
}
ckey := canonicalizeKey(k)
contentHash := hash(data)
if filename == "" {
filename = contentHash
}
filename = sanitizeFilename(filename, contentHash)
if existing, ok := h.index.get(ckey); ok {
if existing.ContentHash == contentHash {
return nil
}
_ = h.store.Remove(existing.ContentHash)
h.index.delete(ckey)
}
if err := h.store.Write(contentHash, filename, data); err != nil {
return err
}
logger.Debug(
fmt.Sprintf(
"cache store: %s (%s, %s)",
k,
kind,
integrity.State,
),
)
h.index.put(
ckey, &CacheEntry{
Kind: kind,
Filename: filename,
Size: int64(len(data)),
ContentHash: contentHash,
Integrity: integrity,
Expiration: time.Now().Add(expiration),
Key: string(ckey),
CreatedAt: time.Now(),
},
)
if err := h.index.flush(); err != nil {
logger.Warn(
fmt.Errorf("failed to update index after adding item: %w", err),
)
}
return nil
}
// IngestEntry is a file-path variant of AddEntry for large files that should
// not be loaded into memory. The source file at srcPath is moved into the
// content-addressed store; contentHash must be pre-computed by the caller.
func (h *handler) IngestEntry(
srcPath string,
filename string,
k string,
size int64,
contentHash string,
kind EntryKind,
integrity Integrity,
expiration time.Duration,
) error {
h.mu.Lock()
defer h.mu.Unlock()
if !h.on {
return nil
}
if expiration == 0 {
expiration = h.policy.ConfigFor(kind).TTL
}
ckey := canonicalizeKey(k)
if filename == "" {
filename = contentHash
}
filename = sanitizeFilename(filename, contentHash)
if existing, ok := h.index.get(ckey); ok {
if existing.ContentHash == contentHash {
return nil
}
_ = h.store.Remove(existing.ContentHash)
h.index.delete(ckey)
}
if err := h.store.Ingest(contentHash, filename, srcPath); err != nil {
return err
}
logger.Debug(
fmt.Sprintf(
"cache ingest: %s (%s, %s)",
k,
kind,
integrity.State,
),
)
h.index.put(
ckey, &CacheEntry{
Kind: kind,
Filename: filename,
Size: size,
ContentHash: contentHash,
Integrity: integrity,
Expiration: time.Now().Add(expiration),
Key: string(ckey),
CreatedAt: time.Now(),
},
)
if err := h.index.flush(); err != nil {
logger.Warn(
fmt.Errorf("failed to update index after ingesting item: %w", err),
)
}
return nil
}
func (h *handler) Flush() error {
h.mu.Lock()
defer h.mu.Unlock()
if !h.on {
return nil
}
return h.index.flush()
}
func (h *handler) Exist(k string) bool {
h.mu.RLock()
defer h.mu.RUnlock()
return h.existLocked(k)
}
func (h *handler) existLocked(k string) bool {
if !h.on {
return false
}
return h.index.exists(canonicalizeKey(k))
}
func (h *handler) Get(k string) (hit bool, file *os.File, err error) {
h.mu.RLock()
defer h.mu.RUnlock()
if !h.on {
return false, nil, nil
}
ckey := canonicalizeKey(k)
entry, ok := h.index.get(ckey)
if !ok {
logger.Debug("cache miss: " + k)
return false, nil, nil
}
file, err = h.store.Read(entry.ContentHash, entry.Filename)
if err != nil {
return false, nil, err
}
logger.Debug("cache hit: " + k)
return true, file, nil
}
func (h *handler) GetBytes(k string) (hit bool, data []byte, err error) {
h.mu.RLock()
defer h.mu.RUnlock()
if !h.on {
return false, nil, nil
}
ckey := canonicalizeKey(k)
entry, ok := h.index.get(ckey)
if !ok {
logger.Debug("cache miss: " + k)
return false, nil, nil
}
data, err = h.store.ReadBytes(entry.ContentHash, entry.Filename)
if err != nil {
return false, nil, err
}
logger.Debug("cache hit: " + k)
return true, data, nil
}
func (h *handler) Remove(k string) error {
h.mu.Lock()
defer h.mu.Unlock()
return h.removeLocked(canonicalizeKey(k))
}
func (h *handler) removeLocked(k key) error {
if err := h.removeEntryLocked(k); err != nil {
return err
}
if err := h.index.flush(); err != nil {
logger.Warn(
fmt.Errorf("failed to update index after removing item: %w", err),
)
}
return nil
}
func (h *handler) removeEntryLocked(k key) error {
if !h.on {
return nil
}
entry, ok := h.index.get(k)
if !ok {
return nil
}
if err := h.store.Remove(entry.ContentHash); err != nil {
return err
}
h.index.delete(k)
return nil
}
func (h *handler) All() []*CacheEntry {
h.mu.RLock()
defer h.mu.RUnlock()
if !h.on {
return nil
}
entries := h.index.all()
result := make([]*CacheEntry, 0, len(entries))
for _, entry := range entries {
result = append(result, entry)
}
return result
}
func (h *handler) ClearAll() (report ResetReport, err error) {
h.mu.Lock()
defer h.mu.Unlock()
if !h.on {
return ResetReport{}, nil
}
if report, err = resetCache(h.index.path, true); err != nil {
return ResetReport{}, fmt.Errorf("failed to clear cache: %w", err)
}
logger.Info("cache cleared")
idx := newIndex(h.index.path)
if !idx.create() {
return ResetReport{}, fmt.Errorf("failed to create new index after clearing cache")
}
h.index = idx
return report, nil
}
package cache
import (
"errors"
"fmt"
"os"
"path"
"path/filepath"
"github.com/mclucy/lucy/logger"
)
const (
manifestFilename = "cache.json"
)
type key string
type ResetReport struct {
TotalFreedSize int64
FileCount int
}
func resetCache(manifestPath string, verbose bool) (ResetReport, error) {
err := os.Remove(manifestPath)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return ResetReport{}, err
}
cacheDir := path.Dir(manifestPath)
entries, err := os.ReadDir(cacheDir)
if err != nil {
return ResetReport{}, err
}
var report ResetReport
for _, entry := range entries {
if entry.Name() == manifestFilename {
continue
}
entryPath := path.Join(cacheDir, entry.Name())
var size int64
size, err = calculateSize(entryPath)
if err != nil {
logger.Debug(
fmt.Sprintf(
"failed to calculate size for %s: %v",
entryPath,
err,
),
)
size = 0
}
if err := os.RemoveAll(entryPath); err != nil {
logger.Warn(
fmt.Errorf(
"failed to remove cache item %s: %w",
entryPath, err,
),
)
} else if verbose {
logger.ShowInfo(fmt.Sprintf("removed %s", entryPath))
}
report.TotalFreedSize += size
report.FileCount++
}
return report, nil
}
// calculateSize recursively calculates the total size of a file or directory
func calculateSize(filePath string) (int64, error) {
var totalSize int64
err := filepath.Walk(
filePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
totalSize += info.Size()
}
return nil
},
)
return totalSize, err
}
package cache
import (
"crypto/sha256"
"fmt"
"net/url"
"os"
"path"
"sort"
"strings"
"time"
"github.com/mclucy/lucy/logger"
)
var hash = func(data []byte) string { return fmt.Sprintf("%x", sha256.Sum256(data)) }
func setDir(name string) string {
dir, err := os.UserCacheDir()
if err != nil {
dir = os.TempDir()
}
return path.Join(dir, "lucy", name)
}
func (h *handler) clearExpiredCache() {
expired := expiredEntries(h.index.all(), time.Now())
for _, k := range expired {
logger.Info("removing expired cache item " + k)
if err := h.removeEntryLocked(k); err != nil {
continue
}
}
}
// expiredEntries returns keys of all expired entries.
func expiredEntries(entries map[key]*CacheEntry, now time.Time) []key {
var expired []key
for k, entry := range entries {
if entry.Expiration.Before(now) {
expired = append(expired, k)
}
}
return expired
}
func (h *handler) maintainCacheLimit() {
evicted := evictionCandidates(h.index.all(), h.policy)
for _, e := range evicted {
logger.Info("removing cache item " + e.key)
if err := h.removeEntryLocked(e.key); err != nil {
continue
}
}
}
type evictionTarget struct {
key key
kind EntryKind
size int64
exp time.Time
}
func evictionTargets(entries map[key]*CacheEntry) (map[EntryKind]int64, []evictionTarget) {
totals := map[EntryKind]int64{}
var targets []evictionTarget
for k, entry := range entries {
totals[entry.Kind] += entry.Size
targets = append(targets, evictionTarget{
key: k,
kind: entry.Kind,
size: entry.Size,
exp: entry.Expiration,
})
}
sort.Slice(targets, func(i, j int) bool {
return targets[i].exp.Before(targets[j].exp)
})
return totals, targets
}
func evictionCandidates(entries map[key]*CacheEntry, policy Policy) []evictionTarget {
totals, targets := evictionTargets(entries)
var result []evictionTarget
for _, e := range targets {
limit := policy.ConfigFor(e.kind).MaxSize
if totals[e.kind] <= limit {
continue
}
result = append(result, e)
totals[e.kind] -= e.size
}
return result
}
func canonicalizeKey(k string) key {
u, err := url.Parse(k)
if err != nil || u.Scheme == "" {
return key(k)
}
u.Scheme = strings.ToLower(u.Scheme)
u.Host = strings.ToLower(u.Host)
host := u.Hostname()
port := u.Port()
if (u.Scheme == "http" && port == "80") || (u.Scheme == "https" && port == "443") {
u.Host = host
}
if u.Path != "" {
u.Path = path.Clean(u.Path)
}
if u.RawQuery != "" {
params := u.Query()
keys := make([]string, 0, len(params))
for k := range params {
keys = append(keys, k)
}
sort.Strings(keys)
var parts []string
for _, k := range keys {
vals := params[k]
sort.Strings(vals)
for _, v := range vals {
parts = append(parts, url.QueryEscape(k)+"="+url.QueryEscape(v))
}
}
u.RawQuery = strings.Join(parts, "&")
}
u.Fragment = ""
u.RawFragment = ""
return key(u.String())
}
package cache
import (
"fmt"
"time"
)
type CacheConfig struct {
Enabled bool `json:"enabled"`
DownloadMaxSize int64 `json:"download_max_size"`
DownloadKeepFor time.Duration `json:"download_keep_for"`
IndexMaxSize int64 `json:"index_max_size"`
IndexRefreshAfter time.Duration `json:"index_refresh_after"`
}
func DefaultCacheConfig() CacheConfig {
return CacheConfig{
Enabled: true,
DownloadMaxSize: 2 * 1024 * 1024 * 1024, // 2 GB
DownloadKeepFor: 7 * 24 * time.Hour, // 7 days
IndexMaxSize: 50 * 1024 * 1024, // 50 MB
IndexRefreshAfter: 4 * time.Hour, // 4 hours
}
}
func (c *CacheConfig) Validate() error {
if !c.Enabled {
return nil
}
if c.DownloadMaxSize <= 0 {
return fmt.Errorf("download_max_size must be positive, got %d", c.DownloadMaxSize)
}
if c.DownloadKeepFor <= 0 {
return fmt.Errorf("download_keep_for must be positive, got %s", c.DownloadKeepFor)
}
if c.IndexMaxSize <= 0 {
return fmt.Errorf("index_max_size must be positive, got %d", c.IndexMaxSize)
}
if c.IndexRefreshAfter <= 0 {
return fmt.Errorf("index_refresh_after must be positive, got %s", c.IndexRefreshAfter)
}
return nil
}
func (c *CacheConfig) toPolicy() Policy {
return Policy{
Metadata: PolicyConfig{
MaxSize: c.IndexMaxSize,
TTL: c.IndexRefreshAfter,
},
Artifact: PolicyConfig{
MaxSize: c.DownloadMaxSize,
TTL: c.DownloadKeepFor,
},
}
}
package cache
import "time"
// EntryKind distinguishes artifact entries (large, immutable binaries like
// server JARs and mods) from metadata entries (small, frequently refreshed
// manifests and version indexes).
type EntryKind uint8
const (
KindMetadata EntryKind = iota
KindArtifact
)
func (k EntryKind) String() string {
switch k {
case KindMetadata:
return "metadata"
case KindArtifact:
return "artifact"
default:
return "unknown"
}
}
// HashAlgorithm identifies the hash function used for integrity verification.
// Different upstream sources provide different algorithms: Mojang uses SHA-1,
// Modrinth provides SHA-1 and SHA-512, and Lucy uses SHA-256 internally for
// content addressing.
type HashAlgorithm uint8
const (
HashNone HashAlgorithm = iota
HashSHA1 // Mojang-provided hashes
HashSHA256 // Internal content addressing
HashSHA512 // Modrinth-provided hashes
)
func (h HashAlgorithm) String() string {
switch h {
case HashSHA1:
return "sha1"
case HashSHA256:
return "sha256"
case HashSHA512:
return "sha512"
default:
return "none"
}
}
// ParseHashAlgorithm converts a string representation to HashAlgorithm.
// Returns HashNone for unrecognized inputs.
func ParseHashAlgorithm(s string) HashAlgorithm {
switch s {
case "sha1":
return HashSHA1
case "sha256":
return HashSHA256
case "sha512":
return HashSHA512
default:
return HashNone
}
}
// IntegrityState tracks whether a cached entry's content has been verified
// against an expected digest from an upstream source.
type IntegrityState uint8
const (
// IntegrityUnverified means the entry was cached without a known-good
// digest to compare against, or verification has not yet occurred.
IntegrityUnverified IntegrityState = iota
// IntegrityVerified means the entry's content matched the expected
// digest at cache-add time.
IntegrityVerified
)
func (s IntegrityState) String() string {
switch s {
case IntegrityUnverified:
return "unverified"
case IntegrityVerified:
return "verified"
default:
return "unknown"
}
}
// Integrity holds the expected or actual digest of a cache entry along with
// its verification state. When Expected is empty, the entry operates in
// unverified mode (best-effort caching without integrity guarantees).
type Integrity struct {
Algorithm HashAlgorithm `json:"algorithm"`
Expected string `json:"expected,omitempty"`
Actual string `json:"actual,omitempty"`
State IntegrityState `json:"state"`
}
// CacheEntry is the enriched metadata record for a single cached blob.
// ContentHash (always SHA-256) is used for content-addressed storage.
// Integrity tracks the upstream-provided digest which may use a different
// algorithm (SHA-1 for Mojang, SHA-512 for Modrinth).
type CacheEntry struct {
Kind EntryKind `json:"kind"`
Filename string `json:"filename"`
Size int64 `json:"size"`
ContentHash string `json:"content_hash"`
Integrity Integrity `json:"integrity"`
Expiration time.Time `json:"expiration"`
Key string `json:"key"`
CreatedAt time.Time `json:"created_at"`
}
package cache
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
)
const indexVersion = 2
type indexManifest struct {
Version int `json:"version"`
Entries map[key]*CacheEntry `json:"entries"`
}
type index struct {
path string
entries map[key]*CacheEntry
}
func newIndex(manifestPath string) *index {
idx := &index{
path: manifestPath,
entries: make(map[key]*CacheEntry),
}
return idx
}
func (idx *index) load() bool {
file, err := os.Open(idx.path)
if errors.Is(err, os.ErrNotExist) {
return idx.create()
} else if err != nil {
return false
}
defer tools.CloseReader(file, logger.Warn)
data, err := io.ReadAll(file)
if err != nil {
_, _ = resetCache(idx.path, false)
return idx.create()
}
if idx.tryLoadV2(data) {
return true
}
_, _ = resetCache(idx.path, false)
return idx.create()
}
func (idx *index) tryLoadV2(data []byte) bool {
var m indexManifest
if err := json.Unmarshal(data, &m); err != nil {
return false
}
if m.Version != indexVersion || m.Entries == nil {
return false
}
idx.entries = m.Entries
return true
}
func (idx *index) create() bool {
dir := filepath.Dir(idx.path)
if err := os.MkdirAll(dir, 0o700); err != nil {
logger.Warn(
fmt.Errorf(
"failed to create index directory %s: %w",
dir,
err,
),
)
return false
}
idx.entries = make(map[key]*CacheEntry)
if err := idx.flush(); err != nil {
logger.Warn(fmt.Errorf("failed to write initial index: %w", err))
return false
}
return true
}
func (idx *index) flush() error {
m := indexManifest{
Version: indexVersion,
Entries: idx.entries,
}
data, err := json.Marshal(m)
if err != nil {
return fmt.Errorf("failed to marshal index: %w", err)
}
tempFile := idx.path + ".tmp"
if err := os.WriteFile(tempFile, data, 0o600); err != nil {
logger.Warn(os.Remove(tempFile))
return fmt.Errorf("failed to write temporary index file: %w", err)
}
if err := os.Rename(tempFile, idx.path); err != nil {
logger.Warn(os.Remove(tempFile))
return fmt.Errorf("failed to replace index file: %w", err)
}
return nil
}
func (idx *index) get(k key) (*CacheEntry, bool) {
e, ok := idx.entries[k]
return e, ok
}
func (idx *index) put(k key, entry *CacheEntry) {
idx.entries[k] = entry
}
func (idx *index) delete(k key) {
delete(idx.entries, k)
}
func (idx *index) exists(k key) bool {
_, ok := idx.entries[k]
return ok
}
func (idx *index) all() map[key]*CacheEntry {
return idx.entries
}
package cache
import (
"fmt"
"time"
)
type PolicyConfig struct {
MaxSize int64 `json:"max_size"`
TTL time.Duration `json:"ttl"`
}
type Policy struct {
Metadata PolicyConfig `json:"metadata"`
Artifact PolicyConfig `json:"artifact"`
}
func (p *Policy) ConfigFor(kind EntryKind) PolicyConfig {
switch kind {
case KindMetadata:
return p.Metadata
case KindArtifact:
return p.Artifact
default:
return p.Artifact
}
}
func (p *Policy) Validate() error {
if err := p.Metadata.validate("metadata"); err != nil {
return err
}
return p.Artifact.validate("artifact")
}
func (c *PolicyConfig) validate(name string) error {
if c.MaxSize <= 0 {
return fmt.Errorf(
"invalid %s policy: max_size must be positive, got %d",
name, c.MaxSize,
)
}
if c.TTL <= 0 {
return fmt.Errorf(
"invalid %s policy: ttl must be positive, got %s",
name, c.TTL,
)
}
return nil
}
package cache
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
type store struct {
dir string
}
func newStore(dir string) *store {
return &store{dir: dir}
}
func (s *store) Write(contentHash, filename string, data []byte) error {
filename = sanitizeFilename(filename, contentHash)
dir := filepath.Join(s.dir, contentHash)
if err := os.MkdirAll(dir, 0o700); err != nil {
return fmt.Errorf("failed to create blob directory: %w", err)
}
filePath := filepath.Join(dir, filename)
if !containedUnder(dir, filePath) {
return fmt.Errorf("filename %q escapes cache directory", filename)
}
if err := os.WriteFile(filePath, data, 0o600); err != nil {
return fmt.Errorf("failed to write blob: %w", err)
}
return nil
}
// Read opens the blob and returns the file handle. Caller must close it.
func (s *store) Read(contentHash, filename string) (*os.File, error) {
p := filepath.Join(s.dir, contentHash, filename)
f, err := os.Open(p)
if err != nil {
return nil, fmt.Errorf("failed to open blob: %w", err)
}
return f, nil
}
// ReadBytes reads the blob and returns its contents as bytes.
func (s *store) ReadBytes(contentHash, filename string) ([]byte, error) {
p := filepath.Join(s.dir, contentHash, filename)
data, err := os.ReadFile(p)
if err != nil {
return nil, fmt.Errorf("failed to read blob: %w", err)
}
return data, nil
}
func (s *store) Remove(contentHash string) error {
p := filepath.Join(s.dir, contentHash)
if err := os.RemoveAll(p); err != nil {
return fmt.Errorf("failed to remove blob: %w", err)
}
return nil
}
// Ingest moves a file from srcPath into the content-addressed store.
// Tries os.Rename for atomic same-filesystem moves, falls back to copy+delete.
func (s *store) Ingest(contentHash, filename, srcPath string) error {
filename = sanitizeFilename(filename, contentHash)
dir := filepath.Join(s.dir, contentHash)
if err := os.MkdirAll(dir, 0o700); err != nil {
return fmt.Errorf("failed to create blob directory: %w", err)
}
destPath := filepath.Join(dir, filename)
if !containedUnder(dir, destPath) {
return fmt.Errorf("filename %q escapes cache directory", filename)
}
if err := os.Rename(srcPath, destPath); err == nil {
return nil
}
src, err := os.Open(srcPath)
if err != nil {
return fmt.Errorf("failed to open source for ingestion: %w", err)
}
defer src.Close()
dst, err := os.OpenFile(destPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600)
if err != nil {
return fmt.Errorf("failed to create destination blob: %w", err)
}
if _, err := io.Copy(dst, src); err != nil {
dst.Close()
os.Remove(destPath)
return fmt.Errorf("failed to copy blob during ingestion: %w", err)
}
if err := dst.Close(); err != nil {
os.Remove(destPath)
return fmt.Errorf("failed to finalize blob: %w", err)
}
os.Remove(srcPath)
return nil
}
// sanitizeFilename prevents path traversal by stripping directory components.
func sanitizeFilename(name, fallback string) string {
name = filepath.Base(name)
if name == "." || name == "/" || name == string(filepath.Separator) {
return fallback
}
return name
}
// containedUnder validates child is strictly inside parent (prevents path traversal).
func containedUnder(parent, child string) bool {
absParent, err := filepath.Abs(parent)
if err != nil {
return false
}
absChild, err := filepath.Abs(child)
if err != nil {
return false
}
return strings.HasPrefix(absChild, absParent+string(filepath.Separator))
}
package cmd
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/mclucy/lucy/install"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/state"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
"github.com/spf13/cobra"
)
const (
flagForceName = "force"
flagWithOptionalName = "with-optional"
flagNoOptionalName = "no-optional"
)
var addCmd = &cobra.Command{
Use: "add",
Short: "Add packages under explicit operator control",
Args: cobra.MinimumNArgs(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return CompletePackageIDSuggestions(context.Background(), "add", toComplete)
},
PreRunE: func(cmd *cobra.Command, args []string) error {
withOptional, _ := cmd.Flags().GetBool(flagWithOptionalName)
noOptional, _ := cmd.Flags().GetBool(flagNoOptionalName)
if withOptional && noOptional {
return fmt.Errorf("--with-optional and --no-optional cannot be used together")
}
return nil
},
RunE: runWithErrorLogging(actionAdd),
}
func init() {
addCmd.Flags().BoolP(flagForceName, "f", false, "Ignore version, dependency, and platform warnings")
addCmd.Flags().Bool(flagWithOptionalName, false, "Also install optional upstream dependencies")
addCmd.Flags().Bool(flagNoOptionalName, false, "Skip optional upstream dependencies (default)")
addNoStyleFlag(addCmd)
rootCmd.AddCommand(addCmd)
}
func actionAdd(cmd *cobra.Command, args []string) error {
workDir, err := os.Getwd()
if err != nil {
return fmt.Errorf("could not determine working directory: %w", err)
}
stateSvc := state.NewProjectStateService(workDir)
hasLucyState, err := lucyStateDirExists(workDir)
if err != nil {
return err
}
if hasLucyState {
if err := stateSvc.Load(cmd.Context()); err != nil {
return fmt.Errorf("load lucy state: %w", err)
}
logger.ShowInfo(formatStateSummary(stateSvc))
}
withOptional, _ := cmd.Flags().GetBool(flagWithOptionalName)
options := install.DefaultOptions()
options.WithOptional = withOptional
ids := make([]types.PackageId, 0, len(args))
for _, arg := range args {
id, err := syntax.Parse(arg)
if err != nil {
logger.Fatal(err)
}
ids = append(ids, id)
}
var result *install.Result
if len(ids) > 1 {
result, err = install.InstallMany(ids, types.SourceAuto, options)
} else {
id := ids[0]
if id.Version == types.VersionAny {
id.Version = types.VersionCompatible
}
result, err = install.Install(id, types.SourceAuto, options)
}
if err != nil {
return err
}
if !hasLucyState {
return nil
}
if err := updateAddState(workDir, stateSvc, ids, result); err != nil {
return fmt.Errorf("update state: %w", err)
}
return nil
}
func lucyStateDirExists(workDir string) (bool, error) {
info, err := os.Stat(filepath.Join(workDir, ".lucy"))
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, fmt.Errorf("stat .lucy directory: %w", err)
}
return info.IsDir(), nil
}
func formatStateSummary(stateSvc *state.ProjectStateService) string {
status := []string{
presenceLabel("config", stateSvc.Config() != nil),
presenceLabel("manifest", stateSvc.Manifest() != nil),
presenceLabel("lock", stateSvc.Lock() != nil),
}
return "Lucy state: " + strings.Join(status, ", ")
}
func presenceLabel(name string, present bool) string {
if present {
return name + " present"
}
return name + " absent"
}
func updateAddState(workDir string, stateSvc *state.ProjectStateService, ids []types.PackageId, result *install.Result) error {
if stateSvc == nil {
return nil
}
manifestIntent := buildUpdatedManifest(stateSvc.Manifest(), ids)
if result == nil || len(result.Installed) == 0 {
return state.WriteManifest(workDir, manifestIntent)
}
lock := buildUpdatedLock(workDir, manifestIntent, stateSvc.Lock(), result)
manifest := state.UpdateManifestRolesForAdd(stateSvc.Manifest(), ids, lock)
if err := state.WriteManifest(workDir, manifest); err != nil {
return err
}
lock = buildUpdatedLock(workDir, manifest, stateSvc.Lock(), result)
lock = state.PruneLockForManifest(lock, manifest)
return state.WriteLock(workDir, lock)
}
func buildUpdatedManifest(existing *state.Manifest, ids []types.PackageId) *state.Manifest {
manifest := existing
for _, id := range ids {
manifest = state.UpsertManifestRequiredIntent(manifest, id, types.SourceAuto.String())
}
return manifest
}
func buildUpdatedLock(workDir string, manifest *state.Manifest, existing *state.Lock, result *install.Result) *state.Lock {
var lock state.Lock
if existing != nil {
lock = *existing
lock.Bundles = append([]state.LockedBundle(nil), existing.Bundles...)
lock.Packages = append([]state.LockedPackage(nil), existing.Packages...)
} else {
lock = state.NewLock()
}
runtime := probe.ServerInfo().Runtime
lock.GeneratedAt = state.NewLock().GeneratedAt
lock.ManifestFingerprint = manifestFingerprint(manifest, lock.ManifestFingerprint)
lock.GameVersion = manifestGameVersion(manifest, runtime, lock.GameVersion)
lock.Platform = manifestPlatform(manifest, runtime, lock.Platform)
lock.PlatformVersion = manifestPlatformVersion(manifest, runtime, lock.PlatformVersion)
packagesByID := make(map[string]state.LockedPackage, len(lock.Packages)+len(result.Installed))
for _, pkg := range lock.Packages {
packagesByID[pkg.ID] = pkg
}
for _, pkg := range result.Installed {
locked := lockedPackageFromInstalled(workDir, pkg, result.Provenance[pkg.Id.StringPlatformName()])
packagesByID[locked.ID] = locked
}
packages := make([]state.LockedPackage, 0, len(packagesByID))
for _, pkg := range packagesByID {
packages = append(packages, pkg)
}
lock.Packages = state.CanonicalLockedPackages(packages)
return &lock
}
func manifestFingerprint(manifest *state.Manifest, fallback string) string {
if manifest != nil {
data, err := state.SerializeManifest(manifest)
if err == nil {
sum := sha256.Sum256(data)
return "sha256:" + hex.EncodeToString(sum[:])
}
}
if fallback != "" {
return fallback
}
return "sha256:absent"
}
func manifestGameVersion(manifest *state.Manifest, runtime *types.RuntimeInfo, fallback string) string {
if manifest != nil && manifest.Environment.GameVersion != "" {
return manifest.Environment.GameVersion
}
if runtime != nil {
if version := runtime.GameVersion.String(); version != "" {
return version
}
}
if fallback != "" {
return fallback
}
return types.VersionUnknown.String()
}
func manifestPlatform(manifest *state.Manifest, runtime *types.RuntimeInfo, fallback string) string {
if manifest != nil && manifest.Environment.ModdingPlatform != "" {
return manifest.Environment.ModdingPlatform
}
if runtime != nil {
if platform := runtime.DerivedModLoader().String(); platform != "" {
return platform
}
}
if fallback != "" {
return fallback
}
return string(types.PlatformNone)
}
func manifestPlatformVersion(manifest *state.Manifest, runtime *types.RuntimeInfo, fallback string) string {
if manifest != nil && manifest.Environment.ModdingPlatformVersion != "" {
return manifest.Environment.ModdingPlatformVersion
}
if runtime != nil {
if version := runtime.DerivedLoaderVersion(); version != "" {
return version
}
}
if fallback != "" {
return fallback
}
return types.VersionUnknown.String()
}
func lockedPackageFromInstalled(workDir string, pkg types.Package, provenance []string) state.LockedPackage {
requester := "root"
if len(provenance) > 0 {
requester = provenance[len(provenance)-1]
}
installPath := ""
filename := ""
if pkg.Local != nil {
filename = filepath.Base(pkg.Local.Path)
installPath = relativeInstallPath(workDir, pkg.Local.Path)
}
source := "direct"
url := ""
hash := "unknown"
hashAlgorithm := "sha1"
if pkg.Remote != nil {
if src := pkg.Remote.Source.String(); src != "unknown" {
source = src
}
url = pkg.Remote.FileUrl
if pkg.Remote.Filename != "" {
filename = pkg.Remote.Filename
}
if pkg.Remote.Hash != "" {
hash = pkg.Remote.Hash
}
if pkg.Remote.HashAlgorithm != "" {
hashAlgorithm = pkg.Remote.HashAlgorithm
}
}
return state.LockedPackage{
ID: pkg.Id.StringPlatformName(),
Version: pkg.Id.Version.String(),
Source: source,
URL: url,
Filename: filename,
Hash: hash,
HashAlgorithm: hashAlgorithm,
InstallPath: installPath,
Side: string(state.SideBoth),
Provenance: normalizedProvenance(provenance),
Requester: requester,
}
}
func relativeInstallPath(workDir, installPath string) string {
if installPath == "" {
return ""
}
if rel, err := filepath.Rel(workDir, installPath); err == nil {
return filepath.ToSlash(rel)
}
return filepath.ToSlash(installPath)
}
func normalizedProvenance(provenance []string) []string {
if len(provenance) == 0 {
return []string{"root"}
}
return append([]string(nil), provenance...)
}
package cmd
import (
"fmt"
"sort"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/slugmap"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/tui"
"github.com/spf13/cobra"
)
var cacheCmd = &cobra.Command{
Use: "cache",
Short: "Manage the download cache",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
var cacheLsCmd = &cobra.Command{
Use: "ls",
Aliases: []string{"list"},
Short: "List cached entries",
RunE: runWithErrorLogging(actionCacheLs),
}
var cacheClearCmd = &cobra.Command{
Use: "clear",
Aliases: []string{"rm"},
Short: "Clear all cached downloads",
RunE: runWithErrorLogging(actionCacheClear),
}
var cacheSlugsCmd = &cobra.Command{
Use: "slugs",
Aliases: []string{"slug"},
Short: "Manage the local slug resolution cache",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
var cacheSlugsLsCmd = &cobra.Command{
Use: "ls",
Aliases: []string{"list"},
Short: "List slug mappings",
RunE: runWithErrorLogging(actionCacheSlugsLs),
}
var cacheSlugsClearCmd = &cobra.Command{
Use: "clear",
Aliases: []string{"rm"},
Short: "Clear all slug mappings",
RunE: runWithErrorLogging(actionCacheSlugsClear),
}
func init() {
addJsonFlag(cacheLsCmd)
addNoStyleFlag(cacheLsCmd)
addNoStyleFlag(cacheClearCmd)
addJsonFlag(cacheSlugsLsCmd)
addNoStyleFlag(cacheSlugsLsCmd)
addNoStyleFlag(cacheSlugsClearCmd)
cacheCmd.AddCommand(cacheLsCmd, cacheClearCmd, cacheSlugsCmd)
cacheSlugsCmd.AddCommand(cacheSlugsLsCmd, cacheSlugsClearCmd)
rootCmd.AddCommand(cacheCmd)
}
func actionCacheLs(cmd *cobra.Command, _ []string) error {
entries := cache.Network().All()
jsonOutput, _ := cmd.Flags().GetBool(flagJsonName)
if jsonOutput {
tools.PrintAsJson(entries)
return nil
}
if len(entries) == 0 {
logger.ShowInfo("Cache is empty")
return nil
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].CreatedAt.After(entries[j].CreatedAt)
})
out := &tui.Data{
Fields: []tui.Field{
&tui.FieldAnnotation{
Annotation: fmt.Sprintf("(%d entries)", len(entries)),
},
},
}
for _, entry := range entries {
out.Fields = append(out.Fields, &tui.FieldAnnotatedShortText{
Title: entry.Key,
Text: fmt.Sprintf(
"%s %s",
entry.Kind,
tools.FormatBytesBinary(entry.Size),
),
Annotation: tools.FormatDuration(entry.Expiration),
})
}
tui.Flush(out)
return nil
}
func actionCacheClear(_ *cobra.Command, _ []string) error {
report, err := cache.Network().ClearAll()
if err != nil {
return fmt.Errorf("failed to clear cache: %w", err)
}
logger.ShowInfo("all cache items cleared")
logger.ShowInfo(
fmt.Sprintf(
"removed %d files, freed up %s of space",
report.FileCount,
tools.FormatBytesBinary(report.TotalFreedSize),
),
)
return nil
}
func actionCacheSlugsLs(cmd *cobra.Command, _ []string) error {
entries := slugmap.Default().All()
jsonOutput, _ := cmd.Flags().GetBool(flagJsonName)
if jsonOutput {
tools.PrintAsJson(entries)
return nil
}
if len(entries) == 0 {
logger.ShowInfo("Slug map is empty")
return nil
}
out := &tui.Data{
Fields: []tui.Field{
&tui.FieldAnnotation{
Annotation: fmt.Sprintf("(%d entries)", len(entries)),
},
},
}
for _, entry := range entries {
shortHash := entry.FileHash
if len(shortHash) > 12 {
shortHash = shortHash[:12]
}
out.Fields = append(out.Fields, &tui.FieldAnnotatedShortText{
Title: entry.Source.String() + "/" + entry.LocalId,
Text: entry.CanonicalSlug,
Annotation: shortHash,
})
}
tui.Flush(out)
return nil
}
func actionCacheSlugsClear(_ *cobra.Command, _ []string) error {
slugmap.Default().Clear()
logger.ShowInfo("slug map cleared")
return nil
}
package cmd
import (
"errors"
"github.com/mclucy/lucy/types"
"github.com/spf13/cobra"
)
const (
flagJsonName = "json"
flagLongName = "long"
flagNoStyleName = "no-style"
flagSourceName = "source"
flagLogFileName = "log-file"
flagPrintLogsName = "print-logs"
flagDebugName = "debug"
flagDumpLogsName = "dump-logs"
)
// addJsonFlag adds the --json flag to a command.
func addJsonFlag(cmd *cobra.Command) {
cmd.Flags().Bool(flagJsonName, false, "Print raw JSON response")
}
// addLongFlag adds the --long/-l flag to a command.
func addLongFlag(cmd *cobra.Command) {
cmd.Flags().BoolP(flagLongName, "l", false, "Show hidden or collapsed output")
}
// addNoStyleFlag adds the --no-style flag to a command (local, not persistent).
func addNoStyleFlag(cmd *cobra.Command) {
cmd.Flags().Bool(flagNoStyleName, false, "Disable colored and styled output")
}
// addSourceFlag adds the --source/-s flag to a command.
// Validation of the source value is done in PreRunE of each command.
func addSourceFlag(cmd *cobra.Command) {
cmd.Flags().StringP(flagSourceName, "s", "", "To fetch info from SOURCE")
}
// validateSourceFlag validates the --source flag value.
// Returns an error if the source is not recognized.
func validateSourceFlag(cmd *cobra.Command) error {
source, _ := cmd.Flags().GetString(flagSourceName)
if source != "" && types.ParseSource(source) == types.SourceUnknown {
return errors.New("unknown source " + source)
}
return nil
}
package cmd
import (
"strings"
"github.com/mclucy/lucy/types"
)
// CompletionCandidate holds a value and optional description for shell completion.
type CompletionCandidate struct {
Value string
Description string
}
// FilterByPrefix returns candidates whose Value starts with prefix (case-insensitive).
func FilterByPrefix(candidates []CompletionCandidate, prefix string) []CompletionCandidate {
if prefix == "" {
return candidates
}
lower := strings.ToLower(prefix)
var out []CompletionCandidate
for _, c := range candidates {
if strings.HasPrefix(strings.ToLower(c.Value), lower) {
out = append(out, c)
}
}
return out
}
// ToCobraCompletions converts CompletionCandidate slice to cobra's "value\tDescription" format.
func ToCobraCompletions(candidates []CompletionCandidate) []string {
out := make([]string, 0, len(candidates))
for _, c := range candidates {
if c.Description != "" {
out = append(out, c.Value+"\t"+c.Description)
} else {
out = append(out, c.Value)
}
}
return out
}
// StaticPlatformCandidates returns completion candidates for all user-facing platforms.
func StaticPlatformCandidates() []CompletionCandidate {
return []CompletionCandidate{
{Value: types.PlatformMinecraft.String(), Description: "Vanilla / Bukkit / Paper plugins"},
{Value: types.PlatformFabric.String(), Description: "Fabric mods"},
{Value: types.PlatformForge.String(), Description: "Forge mods"},
{Value: types.PlatformNeoforge.String(), Description: "NeoForge mods"},
{Value: types.PlatformMCDR.String(), Description: "MCDR controller / plugin framework"},
}
}
// StaticSearchPlatformCandidates returns completion candidates for search-enabled platforms (rollout set).
func StaticSearchPlatformCandidates() []CompletionCandidate {
return []CompletionCandidate{
{Value: types.PlatformFabric.String(), Description: "Fabric mods"},
{Value: types.PlatformForge.String(), Description: "Forge mods"},
{Value: types.PlatformNeoforge.String(), Description: "NeoForge mods"},
{Value: "bukkit", Description: "Bukkit/Paper/Spigot plugins"},
}
}
// StaticVersionCandidates returns completion candidates for fuzzy version hints.
func StaticVersionCandidates() []CompletionCandidate {
return []CompletionCandidate{
{Value: types.VersionCompatible.String(), Description: "Newest version that appears to fit the environment"},
{Value: "latest", Description: "Request the newest available version"},
}
}
// StaticSourceCandidates returns completion candidates for concrete upstream sources.
func StaticSourceCandidates() []CompletionCandidate {
return []CompletionCandidate{
{Value: "curseforge", Description: "CurseForge source"},
{Value: types.SourceModrinth.String(), Description: "Modrinth source"},
{Value: types.SourceGitHub.String(), Description: "GitHub Releases"},
{Value: types.SourceMCDR.String(), Description: "MCDR Plugin Catalogue"},
}
}
// StaticSortCandidates returns completion candidates for search sort options.
func StaticSortCandidates() []CompletionCandidate {
return []CompletionCandidate{
{Value: string(types.SearchSortRelevance), Description: "Sort by relevance"},
{Value: string(types.SearchSortDownloads), Description: "Sort by download count"},
{Value: string(types.SearchSortNewest), Description: "Sort by newest"},
}
}
// ParseCompletionToken parses a partial "platform/name@version" token for shell completion.
// Returns parsed components and the active segment ("platform", "name", or "version").
//
// Uses manual string splitting instead of syntax.Parse which panics on partial input.
func ParseCompletionToken(token string) (platform, name, version, segment string) {
if before, after, ok := strings.Cut(token, "@"); ok {
version = after
if beforeSlash, afterSlash, hasSlash := strings.Cut(before, "/"); hasSlash {
platform = beforeSlash
name = afterSlash
} else {
name = before
}
segment = "version"
return
}
if before, after, ok := strings.Cut(token, "/"); ok {
platform = before
name = after
segment = "name"
return
}
platform = token
segment = "platform"
return
}
package cmd
import (
"context"
"sort"
"github.com/spf13/cobra"
)
type PackageIDSuggestionContext struct {
Command string
Token string
Platform string
Name string
Version string
Segment string
}
type PackageIDSuggestionProvider interface {
Name() string
Priority() int
SuggestPackageIDs(context.Context, PackageIDSuggestionContext) ([]CompletionCandidate, error)
}
var packageIDSuggestionProviders []PackageIDSuggestionProvider
func RegisterPackageIDSuggestionProvider(provider PackageIDSuggestionProvider) {
if provider == nil {
return
}
packageIDSuggestionProviders = append(packageIDSuggestionProviders, provider)
sort.SliceStable(packageIDSuggestionProviders, func(i, j int) bool {
return packageIDSuggestionProviders[i].Priority() < packageIDSuggestionProviders[j].Priority()
})
}
func CompletePackageIDSuggestions(ctx context.Context, commandName string, token string) ([]string, cobra.ShellCompDirective) {
platform, name, version, segment := ParseCompletionToken(token)
if segment == "" || segment == "platform" {
candidates := FilterByPrefix(StaticPlatformCandidates(), token)
return ToCobraCompletions(candidates), cobra.ShellCompDirectiveNoFileComp
}
if segment == "version" {
candidates := FilterByPrefix(StaticVersionCandidates(), version)
return ToCobraCompletions(candidates), cobra.ShellCompDirectiveNoFileComp
}
request := PackageIDSuggestionContext{
Command: commandName,
Token: token,
Platform: platform,
Name: name,
Version: version,
Segment: segment,
}
candidates := collectPackageIDSuggestionCandidates(ctx, request)
return ToCobraCompletions(candidates), cobra.ShellCompDirectiveNoFileComp
}
func collectPackageIDSuggestionCandidates(
ctx context.Context,
request PackageIDSuggestionContext,
) []CompletionCandidate {
out := make([]CompletionCandidate, 0)
for _, provider := range packageIDSuggestionProviders {
candidates, err := provider.SuggestPackageIDs(ctx, request)
if err != nil {
continue
}
out = append(out, candidates...)
}
return out
}
package cmd
import "github.com/spf13/cobra"
// configCmd is defined but intentionally not registered with rootCmd.
// It is a stub that preserves the current command surface of exactly 6 top-level subcommands.
var configCmd = &cobra.Command{
Use: "config",
Short: "Manage lucy's configurations",
RunE: func(cmd *cobra.Command, args []string) error {
return nil
},
}
package cmd
import (
"context"
"fmt"
"slices"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/tui"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream/routing"
"github.com/spf13/cobra"
)
var infoCmd = &cobra.Command{
Use: "info",
Short: "Display information of a mod or plugin",
Args: cobra.ExactArgs(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) >= 1 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return CompletePackageIDSuggestions(context.Background(), "info", toComplete)
},
PreRunE: func(cmd *cobra.Command, args []string) error {
return validateSourceFlag(cmd)
},
RunE: runWithErrorLogging(actionInfo),
}
func init() {
addSourceFlag(infoCmd)
addJsonFlag(infoCmd)
addLongFlag(infoCmd)
addNoStyleFlag(infoCmd)
_ = infoCmd.RegisterFlagCompletionFunc(flagSourceName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
candidates := FilterByPrefix(StaticSourceCandidates(), toComplete)
return ToCobraCompletions(candidates), cobra.ShellCompDirectiveNoFileComp
})
rootCmd.AddCommand(infoCmd)
}
func actionInfo(cmd *cobra.Command, args []string) error {
id, err := syntax.Parse(args[0])
if err != nil {
logger.Fatal(err)
}
p := id.NewPackage()
sourceArg, _ := cmd.Flags().GetString(flagSourceName)
specifiedSource := types.ParseSource(sourceArg)
var out *tui.Data
providers, err := routing.ResolveProviders(id.Platform, specifiedSource)
if err != nil {
errArg := sourceArg
if specifiedSource == types.SourceAuto {
errArg = id.Platform.String()
}
logger.ReportError(fmt.Errorf("%w: %s", err, errArg))
return err
}
infoResult, providerErrors, err := routing.FirstInfo(providers, id)
for _, providerErr := range providerErrors {
logger.ReportWarn(
fmt.Errorf(
"info on %s failed: %w",
providerErr.Source.Title(),
providerErr.Err,
),
)
}
if err != nil {
logger.Fatal(fmt.Errorf("failed to get information: %w", err))
}
p.Information, p.Remote = &infoResult.Information, &infoResult.Fetch.Remote
long, _ := cmd.Flags().GetBool(flagLongName)
out = infoOutput(&p, long)
jsonOut, _ := cmd.Flags().GetBool(flagJsonName)
if jsonOut {
tools.PrintAsJson(p)
} else {
tui.Flush(out)
}
return nil
}
// TODO: Link to newest version
// TODO: Link to latest compatible version
// TODO: Generate `lucy add` command
func infoOutput(p *types.Package, longOutput bool) *tui.Data {
maxLines := tools.Ternary(
longOutput,
0,
tools.TermHeight()*3/2,
)
useAlternate := !longOutput
o := &tui.Data{
Fields: []tui.Field{
&tui.FieldAnnotation{
Annotation: "(from " + p.Remote.Source.Title() + ")",
},
&tui.FieldShortText{
Title: "Name",
Text: p.Information.Title,
},
&tui.FieldShortText{
Title: "Description",
Text: p.Information.Brief,
},
tools.Ternary[tui.Field](
p.Information.DescriptionIsMarkdown,
&tui.FieldMarkdown{
Title: "Information",
Text: p.Information.Description,
Padding: true,
LineWrap: true,
MaxColumns: min(tools.TermWidth()*8/10, 100),
MaxLines: maxLines,
UseAlternate: useAlternate,
AlternateText: tools.Underline(p.Information.DescriptionUrl),
FoldNotice: "",
},
&tui.FieldLongText{
Title: "Information",
Text: p.Information.Description,
Padding: true,
LineWrap: true,
MaxColumns: tools.TermWidth() * 8 / 10,
MaxLines: maxLines,
UseAlternate: useAlternate,
AlternateText: tools.Underline(p.Information.DescriptionUrl),
},
),
},
}
var authorNames []string
var authorLinks []string
for _, author := range p.Information.Authors {
authorNames = append(authorNames, author.Name)
authorLinks = append(authorLinks, author.Url)
}
o.Fields = append(
o.Fields,
&tui.FieldMultiAnnotatedShortText{
Title: "Authors",
Texts: authorNames,
Annotations: authorLinks,
ShowTotal: false,
},
)
if p.Information != nil {
o.Fields = append(
o.Fields,
&tui.FieldShortText{
Title: "License",
Text: p.Information.License,
},
)
}
for _, url := range p.Information.Urls {
o.Fields = append(
o.Fields, &tui.FieldShortText{
Title: url.Name,
Text: tools.Underline(url.Url),
},
)
}
o.Fields = append(
o.Fields, &tui.FieldAnnotatedShortText{
Title: "Download",
Text: tools.Underline(p.Remote.FileUrl),
Annotation: p.Remote.Filename,
},
)
// TODO: Put current server version on the top
// TODO: Hide snapshot versions, except if the current server is using it
if p.Supports != nil &&
p.Supports.Platforms != nil &&
!slices.Contains(p.Supports.Platforms, types.PlatformMCDR) {
f := &tui.FieldLabels{
Title: "Game Versions",
Labels: []string{},
MaxWidth: 0,
MaxLines: tools.TermHeight() / 2,
}
for _, version := range p.Supports.MinecraftVersions {
f.Labels = append(f.Labels, version.String())
}
o.Fields = append(o.Fields, f)
}
return o
}
package cmd
import (
"fmt"
"os"
lucyinit "github.com/mclucy/lucy/cmd/init"
"github.com/mclucy/lucy/state"
"github.com/spf13/cobra"
)
const (
flagInitYesName = "yes"
flagInitConflictName = "conflict"
flagInitWorkDirName = "work-dir"
flagInitGameVersion = "game-version"
)
var initCmd = &cobra.Command{
Use: "init",
Short: "Take over the current server into Lucy state",
Long: `Initialize Lucy in the current
directory. Creates .lucy/config.toml, .lucy/manifest.toml, and .lucy/lock.json.
Init is optimized for taking over an existing server before it behaves like a
blank-slate scaffold. Lucy reconstructs the current reality first, then draws
its managed boundary around the parts the operator wants it to own. It inspects
the live server first, records a soft manifest intent from those facts, and
writes an exact lockfile for the resolved managed state.
No files are written until you confirm at the final review step. That confirmation
is mandatory before Lucy persists intent. Existing Lucy state is preserved by
default, and takeover-style init will show you what is already on disk as an
advisory hint before you decide what Lucy should manage. Lucy absorbs the
existing server into a managed boundary instead of claiming total ownership of
the directory.
Version hints are best-effort: omit a version to use "compatible" (newest
version that appears to fit the current environment), use @latest to request
the newest available, or keep the inferred runtime version when you want
Lucy to match the current environment.`,
RunE: runWithErrorLogging(actionInit),
}
func init() {
initCmd.Flags().BoolP(flagInitYesName, "y", false, "Non-interactive mode: accept all defaults without prompting")
initCmd.Flags().StringP(flagInitConflictName, "c", "preserve", "Conflict mode for existing files: preserve, abort, overwrite")
initCmd.Flags().String(flagInitWorkDirName, "", "Override working directory (for testing)")
initCmd.Flags().String(flagInitGameVersion, "1.21", "Game version for non-interactive init (e.g., 1.21.4)")
_ = initCmd.Flags().MarkHidden(flagInitWorkDirName)
rootCmd.AddCommand(initCmd)
}
func actionInit(cmd *cobra.Command, _ []string) error {
workDir, err := resolveWorkDir(cmd)
if err != nil {
return err
}
conflictStr, _ := cmd.Flags().GetString(flagInitConflictName)
conflictMode, err := parseConflictMode(conflictStr)
if err != nil {
return err
}
yes, _ := cmd.Flags().GetBool(flagInitYesName)
gameVersion, _ := cmd.Flags().GetString(flagInitGameVersion)
flowState := lucyinit.NewInitFlowState(workDir)
flowState.ConflictResolution = conflictMode
if gameVersion != "" && gameVersion != "1.21" && flowState.GameVersion == "" {
flowState.GameVersion = gameVersion
}
if yes {
return runNonInteractiveInit(workDir, flowState)
}
return runInteractiveInit(workDir, flowState)
}
func resolveWorkDir(cmd *cobra.Command) (string, error) {
override, _ := cmd.Flags().GetString(flagInitWorkDirName)
if override != "" {
return override, nil
}
wd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("could not determine working directory: %w", err)
}
return wd, nil
}
func parseConflictMode(s string) (lucyinit.ConflictMode, error) {
switch s {
case "preserve", "":
return lucyinit.PreserveExisting, nil
case "abort":
return lucyinit.AbortOnConflict, nil
case "overwrite":
return lucyinit.OverwriteAll, nil
default:
return "", fmt.Errorf("unknown conflict mode %q: must be preserve, abort, or overwrite", s)
}
}
func runNonInteractiveInit(workDir string, s *lucyinit.InitFlowState) error {
if s.GameVersion == "" {
s.GameVersion = "1.21"
}
if s.Platform == "" {
s.Platform = "none"
}
if s.Platform == "none" && s.PlatformVersion == "" {
s.PlatformVersion = "none"
}
if !lucyinit.CanProceed(s) {
return fmt.Errorf("cannot proceed: managed roots are required for non-interactive init (run interactively or provide explicit roots)")
}
s.Confirmed = true
return writeInitResult(workDir, s)
}
func runInteractiveInit(workDir string, s *lucyinit.InitFlowState) error {
if err := lucyinit.RunInteractiveInit(s); err != nil {
return fmt.Errorf("init flow: %w", err)
}
if s.Aborted {
fmt.Fprintln(os.Stderr, "Init cancelled.")
return nil
}
if !s.Confirmed {
fmt.Fprintln(os.Stderr, "Init cancelled.")
return nil
}
return writeInitResult(workDir, s)
}
func writeInitResult(workDir string, s *lucyinit.InitFlowState) error {
result, err := lucyinit.BuildResult(s)
if err != nil {
return fmt.Errorf("build init plan: %w", err)
}
if result.ConfigToWrite != nil {
if err := state.WriteConfig(workDir, result.ConfigToWrite); err != nil {
return fmt.Errorf("write config: %w", err)
}
}
if result.ManifestToWrite != nil {
if err := state.WriteManifest(workDir, result.ManifestToWrite); err != nil {
return fmt.Errorf("write manifest: %w", err)
}
}
if result.LockToWrite != nil {
if err := state.WriteLock(workDir, result.LockToWrite); err != nil {
return fmt.Errorf("write lock: %w", err)
}
}
lucyinit.RefreshObservedStateAfterInitWrites(workDir)
printInitSummary(result)
return nil
}
func printInitSummary(result lucyinit.InitFlowResult) {
fmt.Println("\nLucy initialized successfully.")
if len(result.WrittenFiles) > 0 {
fmt.Println("\nFiles written:")
for _, f := range result.WrittenFiles {
fmt.Printf(" %s\n", f)
}
}
if len(result.SkippedFiles) > 0 {
fmt.Println("\nFiles preserved (already exist):")
for _, f := range result.SkippedFiles {
fmt.Printf(" %s\n", f)
}
}
}
package cmd
import (
"fmt"
"os"
"sort"
"github.com/mclucy/lucy/install"
"github.com/mclucy/lucy/state"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
"github.com/spf13/cobra"
)
type installSyncPlan struct {
Requested []types.PackageId
UsesExactLock bool
Stable bool
}
var installCmd = &cobra.Command{
Use: "install",
Short: "Converge Lucy-managed runtime state from the lockfile",
Args: cobra.NoArgs,
RunE: runWithErrorLogging(actionInstall),
}
func init() {
addNoStyleFlag(installCmd)
rootCmd.AddCommand(installCmd)
}
func actionInstall(cmd *cobra.Command, args []string) error {
workDir, err := os.Getwd()
if err != nil {
return fmt.Errorf("could not determine working directory: %w", err)
}
hasLucyState, err := lucyStateDirExists(workDir)
if err != nil {
return err
}
if !hasLucyState {
return fmt.Errorf("lucy state is not initialized")
}
stateSvc := state.NewProjectStateService(workDir)
if err := stateSvc.Load(cmd.Context()); err != nil {
return fmt.Errorf("load lucy state: %w", err)
}
if stateSvc.Manifest() == nil {
return fmt.Errorf("manifest is required for install")
}
plan, err := buildInstallSyncPlan(stateSvc.Manifest(), stateSvc.Lock(), stateSvc.Config())
if err != nil {
return err
}
if len(plan.Requested) == 0 {
return nil
}
options := install.DefaultOptions()
if cfg := stateSvc.Config(); cfg != nil {
options.WithOptional = cfg.Optional.IncludeOptional
}
result, err := install.InstallMany(plan.Requested, types.SourceAuto, options)
if err != nil {
return err
}
lock := buildUpdatedLock(workDir, stateSvc.Manifest(), stateSvc.Lock(), result)
lock = filteredManagedLock(stateSvc.Config(), stateSvc.Manifest(), lock)
return state.WriteLock(workDir, lock)
}
func buildInstallSyncPlan(manifest *state.Manifest, lock *state.Lock, config *state.Config) (installSyncPlan, error) {
if manifest == nil {
return installSyncPlan{}, fmt.Errorf("manifest is required for install")
}
exact, ok, err := exactSyncPackageIDs(manifest, lock, config)
if err != nil {
return installSyncPlan{}, err
}
if ok {
return installSyncPlan{Requested: exact, UsesExactLock: true, Stable: true}, nil
}
required, err := manifestRequiredPackageIDs(manifest)
if err != nil {
return installSyncPlan{}, err
}
return installSyncPlan{Requested: required, UsesExactLock: false, Stable: false}, nil
}
func exactSyncPackageIDs(manifest *state.Manifest, lock *state.Lock, config *state.Config) ([]types.PackageId, bool, error) {
if manifest == nil || lock == nil || len(lock.Packages) == 0 {
return nil, false, nil
}
if manifestFingerprint(manifest, "") != lock.ManifestFingerprint {
return nil, false, nil
}
filteredLock := filteredManagedLock(config, manifest, lock)
if len(filteredLock.Packages) == 0 {
return nil, false, nil
}
diff := state.DiffDesiredResolved(managedManifest(manifest), filteredLock)
if len(diff.InManifestNotLock) > 0 || len(diff.InLockNotManifest) > 0 {
return nil, false, nil
}
requested := make([]types.PackageId, 0, len(filteredLock.Packages))
for _, pkg := range filteredLock.Packages {
id, err := syntax.Parse(pkg.ID + "@" + pkg.Version)
if err != nil {
return nil, false, fmt.Errorf("parse locked package %s: %w", pkg.ID, err)
}
requested = append(requested, id)
}
sort.Slice(requested, func(i, j int) bool {
if requested[i].StringPlatformName() != requested[j].StringPlatformName() {
return requested[i].StringPlatformName() < requested[j].StringPlatformName()
}
return requested[i].Version.String() < requested[j].Version.String()
})
return requested, true, nil
}
func manifestRequiredPackageIDs(manifest *state.Manifest) ([]types.PackageId, error) {
requested := make([]types.PackageId, 0, len(manifest.Packages))
for _, pkg := range manifest.Packages {
if pkg.Role != state.RoleRequired {
continue
}
id, err := syntax.Parse(pkg.ID + "@" + pkg.Version)
if err != nil {
return nil, fmt.Errorf("parse manifest package %s: %w", pkg.ID, err)
}
requested = append(requested, id)
}
sort.Slice(requested, func(i, j int) bool {
return requested[i].StringPlatformName() < requested[j].StringPlatformName()
})
return requested, nil
}
func managedManifest(manifest *state.Manifest) *state.Manifest {
if manifest == nil {
return nil
}
cloned := *manifest
cloned.Packages = make([]state.ManifestPackage, 0, len(manifest.Packages))
for _, pkg := range manifest.Packages {
if pkg.Role == state.RoleIgnored {
continue
}
cloned.Packages = append(cloned.Packages, pkg)
}
return &cloned
}
func filteredManagedLock(config *state.Config, manifest *state.Manifest, lock *state.Lock) *state.Lock {
if lock == nil {
return nil
}
filtered := *lock
filtered.Packages = make([]state.LockedPackage, 0, len(lock.Packages))
filtered.Bundles = append([]state.LockedBundle(nil), lock.Bundles...)
ignored := make(map[string]struct{})
scope := state.NewManagedScope(nil, nil)
if config != nil {
scope = state.NewManagedScope(config.Scope.ManagedRoots, config.Scope.UnmanagedPaths)
}
if manifest != nil {
for _, pkg := range manifest.Packages {
if pkg.Role == state.RoleIgnored {
ignored[pkg.ID] = struct{}{}
}
}
}
for _, pkg := range lock.Packages {
if _, skip := ignored[pkg.ID]; skip {
continue
}
if !state.IsManaged(scope, pkg.InstallPath) {
continue
}
filtered.Packages = append(filtered.Packages, pkg)
}
filtered.Packages = state.CanonicalLockedPackages(filtered.Packages)
return &filtered
}
package cmd
import (
"context"
"fmt"
"os"
"github.com/mclucy/lucy/state"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
"github.com/spf13/cobra"
)
var removeCmd = &cobra.Command{
Use: "remove",
Short: "Remove packages under explicit operator control",
Args: cobra.MinimumNArgs(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return CompletePackageIDSuggestions(context.Background(), "remove", toComplete)
},
RunE: runWithErrorLogging(actionRemove),
}
func init() {
addNoStyleFlag(removeCmd)
rootCmd.AddCommand(removeCmd)
}
func actionRemove(cmd *cobra.Command, args []string) error {
workDir, err := os.Getwd()
if err != nil {
return fmt.Errorf("could not determine working directory: %w", err)
}
hasLucyState, err := lucyStateDirExists(workDir)
if err != nil {
return err
}
if !hasLucyState {
return fmt.Errorf("lucy state is not initialized")
}
stateSvc := state.NewProjectStateService(workDir)
if err := stateSvc.Load(cmd.Context()); err != nil {
return fmt.Errorf("load lucy state: %w", err)
}
if stateSvc.Manifest() == nil {
return fmt.Errorf("manifest is required for remove")
}
ids := make([]types.PackageId, 0, len(args))
for _, arg := range args {
id, err := syntax.Parse(arg)
if err != nil {
return err
}
ids = append(ids, id)
}
manifest := state.UpdateManifestRolesForRemove(stateSvc.Manifest(), ids, stateSvc.Lock())
if err := state.WriteManifest(workDir, manifest); err != nil {
return fmt.Errorf("update manifest: %w", err)
}
if stateSvc.Lock() == nil {
return nil
}
lock := state.PruneLockForManifest(stateSvc.Lock(), manifest)
if err := state.WriteLock(workDir, lock); err != nil {
return fmt.Errorf("update lock: %w", err)
}
return nil
}
package cmd
import (
"context"
"errors"
"fmt"
"strconv"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/tui"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream/routing"
"github.com/spf13/cobra"
)
const (
flagIndexName = "index"
flagClientName = "client"
flagPlatformName = "platform"
)
var searchCmd = &cobra.Command{
Use: "search",
Short: "Search for mods and plugins",
Args: cobra.ExactArgs(1),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if len(args) >= 1 {
return nil, cobra.ShellCompDirectiveNoFileComp
}
return CompletePackageIDSuggestions(context.Background(), "search", toComplete)
},
PreRunE: func(cmd *cobra.Command, args []string) error {
index, _ := cmd.Flags().GetString(flagIndexName)
if !types.SearchSort(index).Valid() {
return errors.New("--index must be one of \"relevance\", \"downloads\", \"newest\"")
}
platform, _ := cmd.Flags().GetString(flagPlatformName)
if platform != "" && !types.Platform(platform).IsSearchPlatform() {
return errors.New("--platform must be one of \"fabric\", \"forge\", \"neoforge\", \"bukkit\"")
}
return validateSourceFlag(cmd)
},
RunE: runWithErrorLogging(actionSearch),
}
func init() {
searchCmd.Flags().StringP(flagIndexName, "i", "relevance", "Index search results by INDEX")
searchCmd.Flags().BoolP(flagClientName, "c", false, "Also show client-only mods in results")
searchCmd.Flags().String(flagPlatformName, "", "Filter results by platform (fabric, forge, neoforge, bukkit)")
addJsonFlag(searchCmd)
addLongFlag(searchCmd)
addNoStyleFlag(searchCmd)
addSourceFlag(searchCmd)
_ = searchCmd.RegisterFlagCompletionFunc(flagSourceName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
candidates := FilterByPrefix(StaticSourceCandidates(), toComplete)
return ToCobraCompletions(candidates), cobra.ShellCompDirectiveNoFileComp
})
_ = searchCmd.RegisterFlagCompletionFunc(flagIndexName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
candidates := FilterByPrefix(StaticSortCandidates(), toComplete)
return ToCobraCompletions(candidates), cobra.ShellCompDirectiveNoFileComp
})
_ = searchCmd.RegisterFlagCompletionFunc(flagPlatformName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
candidates := FilterByPrefix(StaticSearchPlatformCandidates(), toComplete)
return ToCobraCompletions(candidates), cobra.ShellCompDirectiveNoFileComp
})
rootCmd.AddCommand(searchCmd)
}
func actionSearch(cmd *cobra.Command, args []string) error {
p, err := syntax.Parse(args[0])
if err != nil {
logger.Fatal(err)
}
index, _ := cmd.Flags().GetString(flagIndexName)
client, _ := cmd.Flags().GetBool(flagClientName)
long, _ := cmd.Flags().GetBool(flagLongName)
sourceArg, _ := cmd.Flags().GetString(flagSourceName)
platformArg, _ := cmd.Flags().GetString(flagPlatformName)
specifiedSource := types.ParseSource(sourceArg)
resolvedPlatform, err := ResolvePlatform(p.Platform, platformArg)
if err != nil {
logger.Fatal(err)
}
options := types.SearchOptions{
IncludeClient: client,
SortBy: types.SearchSort(index),
FilterPlatform: resolvedPlatform,
}
out := &tui.Data{}
providers, err := routing.ResolveSearchProviders(options.FilterPlatform, specifiedSource)
if err != nil {
errArg := sourceArg
if specifiedSource == types.SourceAuto {
errArg = options.FilterPlatform.String()
}
logger.Fatal(fmt.Errorf("%w: %s", err, errArg))
}
results, errs := routing.SearchMany(providers, p.Name, options)
for _, err := range errs {
providerErr := fmt.Errorf(
"search on %s failed: %w",
err.Source.Title(),
err.Err,
)
if specifiedSource == types.SourceAuto && len(providers) > 1 {
logger.ReportWarn(providerErr)
continue
}
logger.ReportWarn(providerErr)
}
if err := searchResultError(results, errs); err != nil {
return err
}
for _, res := range results {
appendToSearchOutput(out, long, res)
}
tui.Flush(out)
return nil
}
func searchResultError(
results []types.SearchResults,
providerErrors []routing.ProviderError,
) error {
if len(results) > 0 || len(providerErrors) == 0 {
return nil
}
joined := make([]error, 0, len(providerErrors))
for _, providerErr := range providerErrors {
joined = append(joined, providerErr)
}
return errors.Join(joined...)
}
func appendToSearchOutput(
out *tui.Data,
showAll bool,
res types.SearchResults,
) {
var results []string
for _, r := range res.Projects {
results = append(results, r.String())
}
if len(out.Fields) != 0 {
out.Fields = append(
out.Fields, &tui.FieldSeparator{
Length: 0,
Dim: false,
},
)
}
out.Fields = append(
out.Fields,
&tui.FieldAnnotation{
Annotation: "Results from " + res.Source.Title(),
},
)
if res.Source == types.SourceModrinth && len(res.Projects) == 100 {
out.Fields = append(
out.Fields,
&tui.FieldAnnotation{
Annotation: "* only showing the top 100",
},
)
}
out.Fields = append(
out.Fields,
&tui.FieldShortText{
Title: "# ",
Text: strconv.Itoa(len(res.Projects)),
},
&tui.FieldDynamicColumnLabels{
Title: ">>>",
Labels: results,
MaxLines: tools.Ternary(
showAll,
0,
tools.TermHeight()-6,
),
},
)
}
package cmd
import (
"fmt"
"strings"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/tui"
"github.com/mclucy/lucy/types"
"github.com/spf13/cobra"
)
var statusCmd = &cobra.Command{
Use: "status",
Short: "Display basic information of the current server",
RunE: runWithErrorLogging(actionStatus),
}
func init() {
addJsonFlag(statusCmd)
addLongFlag(statusCmd)
rootCmd.AddCommand(statusCmd)
}
func actionStatus(cmd *cobra.Command, args []string) error {
serverInfo := probe.ServerInfo()
json, _ := cmd.Flags().GetBool(flagJsonName)
long, _ := cmd.Flags().GetBool(flagLongName)
noStyle, _ := cmd.Flags().GetBool(flagNoStyleName)
if json {
tools.PrintAsJson(serverInfo)
} else {
tui.Flush(generateStatusOutput(&serverInfo, long, noStyle))
}
return nil
}
func generateStatusOutput(
data *types.ServerInfo,
longOutput bool,
noStyle bool,
) (output *tui.Data) {
packageNameOutput := tools.Ternary(
longOutput,
func(pkg types.Package) string { return pkg.Id.StringFull() },
func(pkg types.Package) string { return pkg.Id.Name.String() },
)
if data.Runtime == nil {
return &tui.Data{
Fields: []tui.Field{
&tui.FieldAnnotation{
Annotation: "(No server found)",
},
},
}
}
output = &tui.Data{Fields: []tui.Field{}}
serverPlatform := data.Runtime.DerivedModLoader()
hasMcdr := data.Environments.Mcdr != nil
hasLucy := data.Environments.Lucy != nil
primaryNode, hasPrimaryNode := topologyPrimaryNodeData(data.Runtime.Topology)
// logo display strategy:
// custom client > mod loader > mcdr > lucy > vanilla
var logoPlatform types.Platform
if serverPlatform == types.PlatformVanilla {
if hasMcdr {
logoPlatform = types.PlatformMCDR
} else if hasLucy {
// logoPlatform =
// lucy is not supposed to be a platform, needs refactor
// also need structural support for all other custom server clients
} else {
logoPlatform = types.PlatformVanilla
}
} else if serverPlatform.IsModding() {
output.Fields = append(
output.Fields,
&tui.FieldLogo{
Platform: logoPlatform,
NoColor: noStyle,
},
)
}
output.Fields = append(
output.Fields,
&tui.FieldAnnotatedShortText{
Title: "Game",
Text: data.Runtime.GameVersion.String(),
Annotation: data.Runtime.PrimaryEntrance,
},
)
if data.Activity != nil {
output.Fields = append(
output.Fields, &tui.FieldAnnotatedShortText{
Title: "Activity",
Text: tools.Ternary(
data.Activity.Active,
"Active",
"Inactive",
),
Annotation: tools.Ternary(
data.Activity.Active,
fmt.Sprintf("PID %d", data.Activity.Pid),
"",
),
},
)
} else {
output.Fields = append(
output.Fields, &tui.FieldShortText{
Title: "Activity",
Text: tools.Dim("(Unknown)"),
},
)
}
// Show modding platform if detected, even if no mods found, to differentiate
// between modded and vanilla servers
if platformLabel := statusRuntimePlatformLabel(
data.Runtime.Topology,
data.Packages,
serverPlatform,
hasPrimaryNode,
primaryNode,
); platformLabel != "" {
output.Fields = append(
output.Fields, &tui.FieldAnnotatedShortText{
Title: "Platform",
Text: platformLabel,
Annotation: data.Runtime.DerivedLoaderVersion(),
},
)
}
if topologyField := statusTopologyField(
data.Runtime.Topology,
hasPrimaryNode,
primaryNode,
); topologyField != nil {
output.Fields = append(output.Fields, topologyField)
}
// If topology is resolved and has meaningful risk, show it.
if riskLevel := statusEffectiveRiskLevel(
data.Runtime.Topology,
hasPrimaryNode,
primaryNode,
); riskLevel > types.RiskNone {
output.Fields = append(
output.Fields, &tui.FieldShortText{
Title: "Risk",
Text: topologyRiskLabel(riskLevel, noStyle),
},
)
}
showMods := false
if data.Runtime.Topology != nil && data.Runtime.Topology.Resolved() {
showMods = data.Runtime.Topology.HasCapability(types.CapabilityFabricMods) ||
data.Runtime.Topology.HasCapability(types.CapabilityForgeMods) ||
data.Runtime.Topology.HasCapability(types.CapabilityNeoforgeMods)
}
// Collect mod/plugin names and paths for later use. This is to avoid
// traversing the package list multiple times, which can be costly when
// there are many packages.
var modNames []string
var modPaths []string
var mcdrPlugins []string
if showMods {
modNames = make([]string, 0, len(data.Packages))
modPaths = make([]string, 0, len(data.Packages))
}
if hasMcdr {
mcdrPlugins = make([]string, 0, len(data.Packages))
}
if showMods || hasMcdr {
for _, p := range data.Packages {
if p.Id.IsIdentityPackage() {
continue
}
packagePlatform := p.Id.Platform
if showMods && packagePlatform == serverPlatform {
modNames = append(modNames, packageNameOutput(p))
if p.Local != nil {
modPaths = append(modPaths, p.Local.Path)
}
}
if hasMcdr && packagePlatform == types.PlatformMCDR {
mcdrPlugins = append(mcdrPlugins, packageNameOutput(p))
}
}
}
// Modding related fields only shown when modding platform detected
if showMods {
modListTitle := tools.Ternary(
noStyle,
"Mods",
"└── Mods",
)
if len(modNames) == 0 {
output.Fields = append(
output.Fields, &tui.FieldShortText{
Title: modListTitle,
Text: tools.Dim("(None)"),
},
)
} else {
output.Fields = append(
output.Fields,
tools.Ternary[tui.Field](
longOutput,
&tui.FieldMultiAnnotatedShortText{
Title: modListTitle,
Texts: modNames,
Annotations: modPaths,
ShowTotal: true,
},
&tui.FieldDynamicColumnLabels{
Title: modListTitle,
Labels: modNames,
MaxLines: 0,
ShowTotal: true,
},
),
)
}
}
showPlugins := false
if data.Runtime.Topology != nil && data.Runtime.Topology.Resolved() {
showPlugins = data.Runtime.Topology.HasCapability(types.CapabilityBukkitPlugins)
}
// List plugins if server can load plugins
if showPlugins {
pluginNames := make([]string, 0, len(data.Packages))
for _, p := range data.Packages {
if p.Id.IsIdentityPackage() {
continue
}
if p.Id.Platform == types.PlatformBukkit {
pluginNames = append(pluginNames, packageNameOutput(p))
}
}
pluginListTitle := tools.Ternary(
noStyle,
"Plugins",
"└── Plugins",
)
if len(pluginNames) == 0 {
output.Fields = append(
output.Fields, &tui.FieldShortText{
Title: pluginListTitle,
Text: tools.Dim("(None)"),
},
)
} else {
output.Fields = append(
output.Fields, &tui.FieldDynamicColumnLabels{
Title: pluginListTitle,
Labels: pluginNames,
MaxLines: 0,
ShowTotal: true,
},
)
}
}
// List MCDR plugins if MCDR environment detected
if hasMcdr {
mcdrPluginListTitle := tools.Ternary(
noStyle,
"MCDR Plugins",
"└── Plugins",
)
// Tell users that MCDR is installed
output.Fields = append(
output.Fields, &tui.FieldShortText{
Title: "MCDR",
Text: "Installed" + tools.Ternary(
noStyle,
"",
tools.Green(" ✓"),
),
},
)
if len(mcdrPlugins) == 0 {
output.Fields = append(
output.Fields, &tui.FieldShortText{
Title: mcdrPluginListTitle,
Text: tools.Dim("(None)"),
},
)
} else {
output.Fields = append(
output.Fields, &tui.FieldDynamicColumnLabels{
Title: mcdrPluginListTitle,
Labels: mcdrPlugins,
MaxLines: 0,
ShowTotal: true,
},
)
}
}
return output
}
func topologyPrimaryNodeData(topology *types.RuntimeTopology) (
types.RuntimeNode,
bool,
) {
if topology == nil || !topology.Resolved() {
return types.RuntimeNode{}, false
}
return topology.PrimaryNodeData()
}
func statusRuntimePlatformLabel(
topology *types.RuntimeTopology,
packages []types.Package,
fallback types.Platform,
hasPrimaryNode bool,
primaryNode types.RuntimeNode,
) string {
label := ""
if hasPrimaryNode {
if primaryNode.Role != types.RuntimeRoleHybrid {
if platform := types.DeclaredModdingPlatformForNode(primaryNode.ID); platform != types.PlatformNone && platform != types.PlatformMinecraft {
label = platform.Title()
}
}
if label == "" {
if nodeLabel := runtimeNodeLabel(primaryNode.ID); nodeLabel != "" && nodeLabel != "Minecraft" {
label = nodeLabel
}
}
}
if label == "" && topology != nil && topology.Resolved() && fallback != types.PlatformMinecraft && fallback != types.PlatformAny {
label = fallback.Title()
}
if label == "" {
return ""
}
if addons := statusPackageAddonLabels(
packages,
primaryNode,
); len(addons) > 0 {
label += " + " + strings.Join(addons, " + ")
}
if extras := runtimeTopologyAddonLabels(
topology,
primaryNode.ID,
); len(extras) > 0 {
label += " + " + strings.Join(extras, " + ")
}
return label
}
func statusPackageAddonLabels(
packages []types.Package,
primaryNode types.RuntimeNode,
) []string {
labels := make([]string, 0, len(packages))
seen := map[string]struct{}{}
for _, pkg := range packages {
label := packageRuntimeLabel(pkg)
if label == "" || label == runtimeNodeLabel(primaryNode.ID) {
continue
}
if _, exists := seen[label]; exists {
continue
}
seen[label] = struct{}{}
labels = append(labels, label)
}
return labels
}
func packageRuntimeLabel(pkg types.Package) string {
switch pkg.Id.Platform {
case types.PlatformFabric:
return "Fabric"
case types.PlatformForge:
return "Forge"
case types.PlatformNeoforge:
return "NeoForge"
case types.PlatformMCDR:
return "MCDR"
case types.Platform("paper"):
return "Paper"
case types.Platform("bukkit"):
return "Bukkit"
case types.Platform("folia"):
return "Folia"
case types.Platform("leaves"):
return "Leaves"
case types.Platform("velocity"):
return "Velocity"
case types.Platform("bungeecord"):
return "BungeeCord"
case types.Platform("waterfall"):
return "Waterfall"
case types.Platform("sponge"):
return "Sponge"
case types.PlatformAny:
switch pkg.Id.Name.String() {
case "connector":
return "Connector"
case "kilt":
return "Kilt"
case "geyser":
return "Geyser"
case "sponge":
return "Sponge"
case "arclight":
return "Arclight"
case "youer":
return "Youer"
}
}
return ""
}
func statusTopologyField(
topology *types.RuntimeTopology,
hasPrimaryNode bool,
primaryNode types.RuntimeNode,
) tui.Field {
if topology == nil {
return nil
}
if !topology.Resolved() {
return &tui.FieldShortText{
Title: "Topology",
Text: tools.Dim("(Unresolved)"),
}
}
if !hasPrimaryNode {
return &tui.FieldShortText{
Title: "Topology",
Text: tools.Dim("(Unknown)"),
}
}
roleLabel := runtimeRoleLabel(primaryNode.Role)
if roleLabel == "Mod loader" || roleLabel == "Plugin core" || roleLabel == "Vanilla" {
return nil
}
if roleLabel == "" {
return nil
}
annotation := runtimeTopologyRelationLabel(topology, primaryNode)
if annotation == "" {
return &tui.FieldShortText{
Title: "Topology",
Text: roleLabel,
}
}
return &tui.FieldAnnotatedShortText{
Title: "Topology",
Text: roleLabel,
Annotation: annotation,
}
}
// statusEffectiveRiskLevel derives a display risk from the primary runtime node and
// its directly connected neighboring nodes. Edges themselves are structural only.
func statusEffectiveRiskLevel(
topology *types.RuntimeTopology,
hasPrimaryNode bool,
primaryNode types.RuntimeNode,
) types.RuntimeRiskLevel {
effective := types.RiskNone
if hasPrimaryNode {
effective = primaryNode.RiskLevel
}
if topology == nil {
return effective
}
for _, edge := range topology.EdgesFrom(topology.PrimaryNode) {
if target, ok := topology.FindNode(edge.To); ok && target.RiskLevel > effective {
effective = target.RiskLevel
}
}
for _, edge := range topology.EdgesTo(topology.PrimaryNode) {
if source, ok := topology.FindNode(edge.From); ok && source.RiskLevel > effective {
effective = source.RiskLevel
}
}
return effective
}
func runtimeTopologyRelationLabel(
topology *types.RuntimeTopology,
primaryNode types.RuntimeNode,
) string {
switch primaryNode.Role {
case types.RuntimeRoleProxy:
if targets := runtimeTopologyTargets(
topology,
primaryNode.ID,
); len(targets) > 0 {
return "proxies to " + strings.Join(targets, ", ")
}
return "proxies to backends"
case types.RuntimeRoleHybrid:
if targets := runtimeTopologyTargets(
topology,
primaryNode.ID,
); len(targets) > 0 {
return "hosts " + strings.Join(targets, ", ")
}
return "hybrid runtime"
case types.RuntimeRoleBridge:
if targets := runtimeTopologyTargets(
topology,
primaryNode.ID,
); len(targets) > 0 {
return "hosts compatibility layer"
}
return "compatibility layer"
case types.RuntimeRoleProtocolBridge:
if targets := runtimeTopologyTargets(
topology,
primaryNode.ID,
); len(targets) > 0 {
return "provides protocol compatibility for " + strings.Join(
targets,
", ",
)
}
return "protocol bridge"
default:
return ""
}
}
func runtimeTopologyTargets(
topology *types.RuntimeTopology,
nodeID types.RuntimeNodeID,
) []string {
if topology == nil {
return nil
}
targets := make([]string, 0, 2)
seen := make(map[string]struct{}, 2)
for _, edge := range topology.EdgesFrom(nodeID) {
switch edge.Verb {
case types.EdgeHosts, types.EdgeProxies:
// keep - these point to meaningful targets
default:
continue
}
if target, ok := topology.FindNode(edge.To); ok {
label := runtimeNodeLabel(target.ID)
if label == "" {
continue
}
if _, exists := seen[label]; exists {
continue
}
seen[label] = struct{}{}
targets = append(targets, label)
}
}
return targets
}
func runtimeTopologyAddonLabels(
topology *types.RuntimeTopology,
primaryNodeID types.RuntimeNodeID,
) []string {
if topology == nil {
return nil
}
labels := make([]string, 0, len(topology.Nodes))
seen := map[string]struct{}{}
for _, node := range topology.Nodes {
if node.ID == primaryNodeID {
continue
}
if node.Role == types.RuntimeRoleModLoader || node.Role == types.RuntimeRoleVanilla {
continue
}
label := runtimeNodeLabel(node.ID)
if label == "" || label == "Vanilla" {
continue
}
if _, exists := seen[label]; exists {
continue
}
seen[label] = struct{}{}
labels = append(labels, label)
}
return labels
}
func runtimeRoleLabel(role types.RuntimeRole) string {
switch role {
case types.RuntimeRoleModLoader:
return "Mod loader"
case types.RuntimeRolePluginCore:
return "Plugin core"
case types.RuntimeRoleHybrid:
return "Hybrid"
case types.RuntimeRoleProxy:
return "Proxy"
case types.RuntimeRoleBridge:
return "Bridge"
case types.RuntimeRoleProtocolBridge:
return "Protocol bridge"
case types.RuntimeRoleVanilla:
return "Vanilla"
default:
return ""
}
}
func runtimeNodeLabel(id types.RuntimeNodeID) string {
switch id {
case probe.RuntimeNodeMinecraft:
return "Vanilla"
case probe.RuntimeNodeFabric:
return "Fabric"
case probe.RuntimeNodeForge:
return "Forge"
case probe.RuntimeNodeNeoforge:
return "NeoForge"
case probe.RuntimeNodeMCDR:
return "MCDR"
case probe.RuntimeNodePaper:
return "Paper"
case probe.RuntimeNodeSpigot:
return "Spigot"
case probe.RuntimeNodeBukkit:
return "Bukkit"
case probe.RuntimeNodeFolia:
return "Folia"
case probe.RuntimeNodeLeaves:
return "Leaves"
case probe.RuntimeNodeSponge:
return "Sponge"
case probe.RuntimeNodeArclight:
return "Arclight"
case probe.RuntimeNodeYouer:
return "Youer"
case probe.RuntimeNodeVelocity:
return "Velocity"
case probe.RuntimeNodeBungeecord:
return "BungeeCord"
case probe.RuntimeNodeWaterfall:
return "Waterfall"
case probe.RuntimeNodeGeyserStandalone:
return "Geyser Standalone"
case probe.RuntimeNodeGeyser:
return "Geyser"
case probe.RuntimeNodeConnector:
return "Connector"
case probe.RuntimeNodeKilt:
return "Kilt"
default:
return tools.Capitalize(
strings.ReplaceAll(
strings.ReplaceAll(
string(id),
"-",
" ",
), "_", " ",
),
)
}
}
func topologyRiskLabel(level types.RuntimeRiskLevel, noStyle bool) string {
switch level {
case types.RiskLow:
return "Low"
case types.RiskMedium:
return "Medium" + tools.Ternary(noStyle, "", " ⚠")
case types.RiskHigh:
return "High" + tools.Ternary(noStyle, "", " ⚠⚠")
case types.RiskCritical:
return "Critical" + tools.Ternary(noStyle, "", " ✗")
default:
return "None"
}
}
package cmd
// CommandContract defines the durable semantic boundary for a user-facing
// command. These contracts are intentionally stricter than the current
// implementation so future work can converge on one meaning per command.
type CommandContract struct {
Name string
Summary string
MutatesManifest bool
MutatesLockfile bool
MutatesRuntime bool
ObservesRuntime bool
ManifestEffect string
LockfileEffect string
RuntimeEffect string
Guardrails []string
}
var (
addContract = CommandContract{
Name: "add",
Summary: "Insert or upgrade required intent, then resolve the resulting closure.",
MutatesManifest: true,
MutatesLockfile: true,
MutatesRuntime: false,
ObservesRuntime: true,
ManifestEffect: "Insert a missing package as required intent or upgrade the existing required intent for the addressed package.",
LockfileEffect: "Resolve the full closure implied by the manifest after the required-intent change and record exact versions, sources, hashes, install paths, and provenance.",
RuntimeEffect: "No direct runtime synchronization contract. Runtime drift may be observed for compatibility checks and warnings, but runtime reconciliation belongs to install.",
Guardrails: []string{
"Must not delete unmanaged content.",
"Must not treat ignored entries as required or transitive.",
"Must not use add as a generic sync/apply command.",
},
}
removeContract = CommandContract{
Name: "remove",
Summary: "Remove required intent, then prune transitive dependencies that are no longer needed.",
MutatesManifest: true,
MutatesLockfile: true,
MutatesRuntime: false,
ObservesRuntime: true,
ManifestEffect: "Remove the addressed package from required intent only; ignored entries remain ignored, and unrelated required roots remain intact.",
LockfileEffect: "Re-resolve the closure after the required-intent removal and prune no longer needed transitive dependencies while keeping packages still reachable from another required root.",
RuntimeEffect: "No direct runtime synchronization contract. Runtime drift may be inspected for warnings, but file deletion/application belongs to install.",
Guardrails: []string{
"Must not remove ignored content.",
"Must not delete still-required packages or still-needed transitives.",
"Must not claim ownership of unmanaged runtime files.",
},
}
installContract = CommandContract{
Name: "install",
Summary: "Synchronize managed runtime state to manifest intent using exact lockfile facts.",
MutatesManifest: false,
MutatesLockfile: true,
MutatesRuntime: true,
ObservesRuntime: true,
ManifestEffect: "None. Install never rewrites desired intent.",
LockfileEffect: "Materialize or refresh the exact resolved closure needed to satisfy the current manifest, then use that lockfile as the source of truth for managed runtime sync.",
RuntimeEffect: "Create, replace, or prune only managed-scope runtime artifacts whose exact state differs from the lockfile. Ignored and unmanaged content are observed boundaries, not deletion targets.",
Guardrails: []string{
"Must not mean delete everything not mentioned in the manifest.",
"Must not mutate ignored entries or unmanaged paths.",
"Must treat probe output as observed state for drift detection, not as manifest intent.",
},
}
)
func CommandContracts() map[string]CommandContract {
return map[string]CommandContract{
addContract.Name: addContract,
removeContract.Name: removeContract,
installContract.Name: installContract,
}
}
package init
import (
"archive/zip"
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
"github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/state"
"github.com/mclucy/lucy/types"
"github.com/pelletier/go-toml"
"gopkg.in/yaml.v3"
)
type DiscoveryConfidence string
const (
ConfidenceHigh DiscoveryConfidence = "high"
ConfidenceMedium DiscoveryConfidence = "medium"
ConfidenceLow DiscoveryConfidence = "low"
ConfidenceNone DiscoveryConfidence = "none"
)
type DiscoveredDefaults struct {
GameVersion string
Platform string
PlatformVersion string
ManagedRoots []string
DetectedPackages []string
PackageClassifications []TakeoverPackageClassification
Confidence DiscoveryConfidence
ExistingLucy ExistingLucyHints
}
// ExistingLucyHints captures pre-existing .lucy state as advisory context.
// Under takeover-first init, these hints may fill observation gaps or explain
// drift, but they must not silently outrank live observed state.
type ExistingLucyHints struct {
GameVersion string
Platform string
PlatformVersion string
ManagedRoots []string
ConfigPresent bool
ManifestPresent bool
LockPresent bool
}
func (h ExistingLucyHints) HasAny() bool {
return h.ConfigPresent || h.ManifestPresent || h.LockPresent || h.GameVersion != "" || h.Platform != "" || h.PlatformVersion != "" || len(h.ManagedRoots) > 0
}
// DiscoverServerDefaults is the takeover aggregator used by the current init
// flow. Contractually, takeover-class init must aggregate current server
// information before it proposes desired intent:
// - discovery-first is only about sequence: discover before asking
// - discovery-led is about behavior: observed facts become the primary input
// to the proposal, and stale .lucy files are demoted to advisory hints
//
// probe.ServerInfoAt(workDir) now provides the primary observed-state layer so
// takeover candidates come from the richer probe/runtime model first. Local
// file/archive heuristics remain fallback-only for gaps the probe could not
// explain. Existing .lucy state is recorded separately and only fills gaps when
// no live observation is available.
func DiscoverServerDefaults(workDir string) DiscoveredDefaults {
defaults := DiscoveredDefaults{Confidence: ConfidenceNone}
applyObservedDefaults(&defaults, workDir, probe.ServerInfoAt(workDir))
if version := discoverGameVersion(workDir); version != "" {
if defaults.GameVersion == "" {
defaults.GameVersion = version
}
defaults.Confidence = maxConfidence(defaults.Confidence, ConfidenceMedium)
}
platform, platformVersion, platformConfidence := discoverPlatform(workDir)
if platform != "" && defaults.Platform == "" {
defaults.Platform = platform
defaults.Confidence = maxConfidence(defaults.Confidence, platformConfidence)
}
if platformVersion != "" && defaults.PlatformVersion == "" {
defaults.PlatformVersion = platformVersion
defaults.Confidence = maxConfidence(defaults.Confidence, platformConfidence)
}
defaults.ManagedRoots = appendUnique(defaults.ManagedRoots, detectManagedRoots(workDir)...)
if len(defaults.ManagedRoots) > 0 && defaults.Confidence == ConfidenceNone {
defaults.Confidence = ConfidenceLow
}
defaults.DetectedPackages = appendUnique(defaults.DetectedPackages, discoverPackages(workDir)...)
if len(defaults.DetectedPackages) > 0 && defaults.Confidence == ConfidenceNone {
defaults.Confidence = ConfidenceLow
}
manifest, manifestExists, manifestErr := state.ReadManifest(workDir)
if manifestErr == nil && manifestExists && manifest != nil {
defaults.ExistingLucy.ManifestPresent = true
defaults.ExistingLucy.GameVersion = strings.TrimSpace(manifest.Environment.GameVersion)
defaults.ExistingLucy.Platform = strings.TrimSpace(manifest.Environment.ModdingPlatform)
defaults.ExistingLucy.PlatformVersion = strings.TrimSpace(manifest.Environment.ModdingPlatformVersion)
}
config, configExists, configErr := state.ReadConfig(workDir)
if configErr == nil && configExists && config != nil {
defaults.ExistingLucy.ConfigPresent = true
defaults.ExistingLucy.ManagedRoots = appendUnique(defaults.ExistingLucy.ManagedRoots, config.Scope.ManagedRoots...)
}
if _, lockExists, lockErr := state.ReadLock(workDir); lockErr == nil && lockExists {
defaults.ExistingLucy.LockPresent = true
}
if defaults.GameVersion == "" {
defaults.GameVersion = defaults.ExistingLucy.GameVersion
}
if defaults.Platform == "" {
defaults.Platform = defaults.ExistingLucy.Platform
}
if defaults.PlatformVersion == "" {
defaults.PlatformVersion = defaults.ExistingLucy.PlatformVersion
}
if len(defaults.ManagedRoots) == 0 {
defaults.ManagedRoots = appendUnique(defaults.ManagedRoots, defaults.ExistingLucy.ManagedRoots...)
}
if defaults.Confidence == ConfidenceNone && defaults.ExistingLucy.HasAny() {
defaults.Confidence = ConfidenceLow
}
return defaults
}
func applyObservedDefaults(defaults *DiscoveredDefaults, workDir string, observed types.ServerInfo) {
if defaults == nil {
return
}
if runtime := observed.Runtime; runtime != nil {
if gameVersion := sanitizeObservedVersion(runtime.GameVersion.String()); gameVersion != "" {
defaults.GameVersion = gameVersion
defaults.Confidence = maxConfidence(defaults.Confidence, ConfidenceHigh)
}
if platform := runtime.DerivedModLoader(); platform.Valid() && platform != types.PlatformMinecraft {
defaults.Platform = string(platform)
defaults.Confidence = maxConfidence(defaults.Confidence, ConfidenceHigh)
defaults.ManagedRoots = appendUnique(defaults.ManagedRoots, defaultManagedRootsForPlatform(string(platform))...)
if identity := runtimeIdentityPackage(string(platform)); identity != "" {
defaults.DetectedPackages = appendUnique(defaults.DetectedPackages, identity)
}
}
if version := sanitizeObservedVersion(runtime.DerivedLoaderVersion()); version != "" {
defaults.PlatformVersion = version
defaults.Confidence = maxConfidence(defaults.Confidence, ConfidenceHigh)
} else if defaults.Platform != "" {
if version := discoverObservedLoaderVersion(workDir, types.Platform(defaults.Platform)); version != "" {
defaults.PlatformVersion = version
defaults.Confidence = maxConfidence(defaults.Confidence, ConfidenceHigh)
}
}
}
defaults.ManagedRoots = appendUnique(defaults.ManagedRoots, managedRootsFromObservedPaths(workDir, observed.ModPath)...)
if observed.Environments.Mcdr != nil && observed.Environments.Mcdr.Config != nil {
defaults.ManagedRoots = appendUnique(defaults.ManagedRoots, managedRootsFromObservedPaths(workDir, observed.Environments.Mcdr.Config.PluginDirectories)...)
}
if len(defaults.ManagedRoots) > 0 {
defaults.Confidence = maxConfidence(defaults.Confidence, ConfidenceHigh)
}
defaults.PackageClassifications = BuildTakeoverPackageClassifications(observed.Packages)
defaults.DetectedPackages = appendUnique(defaults.DetectedPackages, packageCandidatesFromObserved(observed.Packages)...)
for _, classification := range defaults.PackageClassifications {
defaults.DetectedPackages = appendUnique(defaults.DetectedPackages, classification.ID)
}
if len(defaults.DetectedPackages) > 0 || len(defaults.PackageClassifications) > 0 {
defaults.Confidence = maxConfidence(defaults.Confidence, ConfidenceHigh)
}
}
func managedRootsFromObservedPaths(workDir string, paths []string) []string {
roots := make([]string, 0, len(paths))
for _, candidate := range paths {
root := normalizeObservedRoot(workDir, candidate)
if root == "" {
continue
}
roots = append(roots, root)
}
sort.Strings(roots)
return uniqueStrings(roots)
}
func normalizeObservedRoot(workDir, candidate string) string {
candidate = strings.TrimSpace(candidate)
if candidate == "" {
return ""
}
if !filepath.IsAbs(candidate) {
candidate = filepath.Join(workDir, candidate)
}
rel, err := filepath.Rel(workDir, candidate)
if err != nil {
return ""
}
rel = filepath.Clean(rel)
if rel == "." || rel == ".." || strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
return ""
}
return filepath.ToSlash(rel)
}
func packageCandidatesFromObserved(packages []types.Package) []string {
candidates := make([]string, 0, len(packages))
for _, pkg := range packages {
if pkg.Id.Platform == types.PlatformAny || pkg.Id.Platform == types.PlatformUnknown || strings.TrimSpace(pkg.Id.Name.String()) == "" {
continue
}
candidates = append(candidates, pkg.Id.StringPlatformName())
}
sort.Strings(candidates)
return uniqueStrings(candidates)
}
func defaultManagedRootsForPlatform(platform string) []string {
switch types.Platform(strings.TrimSpace(platform)) {
case types.PlatformFabric, types.PlatformForge, types.PlatformNeoforge:
return []string{"mods"}
case types.PlatformMCDR:
return []string{"plugins"}
default:
return nil
}
}
func runtimeIdentityPackage(platform string) string {
p := types.Platform(strings.TrimSpace(platform))
if p == types.PlatformNone || !p.Valid() {
return ""
}
return types.PackageId{Platform: p, Name: types.ProjectName(p.String())}.StringPlatformName()
}
func discoverObservedLoaderVersion(workDir string, platform types.Platform) string {
entries, err := os.ReadDir(workDir)
if err != nil {
return ""
}
for _, entry := range entries {
if entry.IsDir() || !strings.HasSuffix(strings.ToLower(entry.Name()), ".jar") {
continue
}
version := discoverLoaderVersion(filepath.Join(workDir, entry.Name()), platform)
if version != "" {
return version
}
}
return ""
}
func sanitizeObservedVersion(value string) string {
value = strings.TrimSpace(value)
switch strings.ToLower(value) {
case "", "none", "unknown":
return ""
default:
return value
}
}
func ApplyDiscoveredDefaults(s *InitFlowState, defaults DiscoveredDefaults) {
if strings.TrimSpace(s.GameVersion) == "" {
s.GameVersion = strings.TrimSpace(defaults.GameVersion)
}
if strings.TrimSpace(s.Platform) == "" {
s.Platform = strings.TrimSpace(defaults.Platform)
}
if strings.TrimSpace(s.PlatformVersion) == "" {
s.PlatformVersion = strings.TrimSpace(defaults.PlatformVersion)
}
if len(s.ManagedRoots) == 0 {
s.ManagedRoots = append([]string(nil), defaults.ManagedRoots...)
}
if len(s.PackageClassifications) == 0 {
s.PackageClassifications = append([]TakeoverPackageClassification(nil), defaults.PackageClassifications...)
}
}
func discoverGameVersion(workDir string) string {
for _, candidate := range []string{"server.properties", "eula.txt"} {
path := filepath.Join(workDir, candidate)
data, err := os.ReadFile(path)
if err != nil {
continue
}
for line := range strings.SplitSeq(string(data), "\n") {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
key, value, ok := strings.Cut(line, "=")
if !ok {
continue
}
if strings.EqualFold(strings.TrimSpace(key), "version") {
return strings.TrimSpace(value)
}
}
}
return ""
}
func discoverPlatform(workDir string) (string, string, DiscoveryConfidence) {
entries, err := os.ReadDir(workDir)
if err != nil {
return "", "", ConfidenceNone
}
hasLibraries := dirExists(filepath.Join(workDir, "libraries"))
hasRunScript := fileExists(filepath.Join(workDir, "run.sh")) || fileExists(filepath.Join(workDir, "run.bat"))
hasMcdrConfig := fileExists(filepath.Join(workDir, "pyproject.toml")) || dirExists(filepath.Join(workDir, "mcs_config"))
if hasMcdrConfig {
return string(types.PlatformMCDR), "", ConfidenceMedium
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := strings.ToLower(entry.Name())
switch {
case strings.HasPrefix(name, "fabric-server") && strings.HasSuffix(name, ".jar"):
return string(types.PlatformFabric), "", ConfidenceHigh
case strings.Contains(name, "fabric") && strings.HasSuffix(name, ".jar"):
return string(types.PlatformFabric), "", ConfidenceMedium
case strings.Contains(name, "neoforge") && strings.HasSuffix(name, ".jar"):
return string(types.PlatformNeoforge), "", ConfidenceHigh
case strings.Contains(name, "forge") && strings.HasSuffix(name, ".jar"):
return string(types.PlatformForge), "", ConfidenceHigh
case name == "server.jar":
return string(types.PlatformNone), "", ConfidenceLow
}
}
if hasLibraries && hasRunScript {
return string(types.PlatformForge), "", ConfidenceMedium
}
if dirExists(filepath.Join(workDir, "mods")) {
return string(types.PlatformFabric), "", ConfidenceLow
}
if dirExists(filepath.Join(workDir, "plugins")) {
return string(types.PlatformNone), "", ConfidenceLow
}
return "", "", ConfidenceNone
}
func discoverLoaderVersion(path string, platform types.Platform) string {
reader, err := zip.OpenReader(path)
if err != nil {
return ""
}
defer reader.Close()
manifest, ok := readArchiveFile(&reader.Reader, "META-INF/MANIFEST.MF")
if !ok {
return ""
}
for line := range strings.SplitSeq(string(manifest), "\n") {
line = strings.TrimSpace(line)
switch platform {
case types.PlatformFabric:
if version := trimVersionFromManifestClasspath(line, "libraries/net/fabricmc/fabric-loader/"); version != "" {
return version
}
case types.PlatformNeoforge:
if version := trimVersionFromManifestClasspath(line, "libraries/net/neoforged/neoforge/"); version != "" {
return version
}
case types.PlatformForge:
if version := trimVersionFromManifestClasspath(line, "libraries/net/minecraftforge/forge/"); version != "" {
return version
}
}
}
return ""
}
func trimVersionFromManifestClasspath(line, marker string) string {
_, rest, ok := strings.Cut(line, marker)
if !ok {
return ""
}
version, _, _ := strings.Cut(rest, "/")
return sanitizeObservedVersion(version)
}
func detectManagedRoots(workDir string) []string {
roots := make([]string, 0, 5)
for _, root := range []string{"mods", "plugins", "config", "datapacks", "resourcepacks", "kubejs"} {
if dirExists(filepath.Join(workDir, root)) {
roots = append(roots, root)
}
}
return roots
}
func discoverPackages(workDir string) []string {
packages := make([]string, 0)
for _, root := range []string{"mods", "plugins"} {
dir := filepath.Join(workDir, root)
entries, err := os.ReadDir(dir)
if err != nil {
continue
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
path := filepath.Join(dir, entry.Name())
packages = appendUnique(packages, detectPackageIDs(path)...)
}
}
sort.Strings(packages)
return packages
}
func detectPackageIDs(path string) []string {
lower := strings.ToLower(path)
switch {
case strings.HasSuffix(lower, ".jar"), strings.HasSuffix(lower, ".pyz"), strings.HasSuffix(lower, ".mcdr"):
return detectArchivePackages(path)
case strings.HasSuffix(lower, ".jar.disabled"):
return detectArchivePackages(path)
default:
return nil
}
}
func detectArchivePackages(path string) []string {
reader, err := zip.OpenReader(path)
if err != nil {
return nil
}
defer reader.Close()
packages := make([]string, 0)
if fabricMeta, ok := readArchiveFile(&reader.Reader, "fabric.mod.json"); ok {
var mod exttype.FileFabricModIdentifier
if json.Unmarshal(fabricMeta, &mod) == nil && strings.TrimSpace(mod.Id) != "" {
packages = append(packages, types.PackageId{Platform: types.PlatformFabric, Name: types.ProjectName(strings.TrimSpace(mod.Id))}.StringPlatformName())
}
}
if neoMeta, ok := readArchiveFile(&reader.Reader, "META-INF/neoforge.mods.toml"); ok {
var mod exttype.FileModLoaderIdentifier
if toml.Unmarshal(neoMeta, &mod) == nil {
for _, item := range mod.Mods {
if strings.TrimSpace(item.ModID) == "" {
continue
}
packages = append(packages, types.PackageId{Platform: types.PlatformNeoforge, Name: types.ProjectName(strings.TrimSpace(item.ModID))}.StringPlatformName())
}
}
}
if forgeMeta, ok := readArchiveFile(&reader.Reader, "META-INF/mods.toml"); ok {
var mod exttype.FileModLoaderIdentifier
if toml.Unmarshal(forgeMeta, &mod) == nil {
for _, item := range mod.Mods {
if strings.TrimSpace(item.ModID) == "" {
continue
}
packages = append(packages, types.PackageId{Platform: types.PlatformForge, Name: types.ProjectName(strings.TrimSpace(item.ModID))}.StringPlatformName())
}
}
}
if oldForgeMeta, ok := readArchiveFile(&reader.Reader, "mcmod.info"); ok {
var mods exttype.FileForgeModIdentifierOld
if json.Unmarshal(oldForgeMeta, &mods) == nil {
for _, item := range mods {
if strings.TrimSpace(item.ModId) == "" {
continue
}
packages = append(packages, types.PackageId{Platform: types.PlatformForge, Name: types.ProjectName(strings.TrimSpace(item.ModId))}.StringPlatformName())
}
}
}
if mcdrMeta, ok := readArchiveFile(&reader.Reader, "mcdreforged.plugin.json"); ok {
var plugin exttype.FileMcdrPluginIdentifier
if json.Unmarshal(mcdrMeta, &plugin) == nil && strings.TrimSpace(plugin.Id) != "" {
packages = append(packages, types.PackageId{Platform: types.PlatformMCDR, Name: types.ProjectName(strings.TrimSpace(plugin.Id))}.StringPlatformName())
}
}
if pluginMeta, ok := readArchiveFile(&reader.Reader, "plugin.yml"); ok {
if id := parsePluginYAMLName(pluginMeta); id != "" {
packages = append(packages, types.PackageId{Platform: types.PlatformNone, Name: types.ProjectName(id)}.StringPlatformName())
}
}
if paperMeta, ok := readArchiveFile(&reader.Reader, "paper-plugin.yml"); ok {
if id := parsePluginYAMLName(paperMeta); id != "" {
packages = append(packages, types.PackageId{Platform: types.PlatformNone, Name: types.ProjectName(id)}.StringPlatformName())
}
}
return uniqueStrings(packages)
}
func readArchiveFile(reader *zip.Reader, name string) ([]byte, bool) {
for _, file := range reader.File {
if file.Name != name {
continue
}
rc, err := file.Open()
if err != nil {
return nil, false
}
defer rc.Close()
data, err := io.ReadAll(rc)
if err != nil {
return nil, false
}
return data, true
}
return nil, false
}
func parsePluginYAMLName(data []byte) string {
var parsed struct {
Name string `yaml:"name"`
}
decoder := yaml.NewDecoder(bytes.NewReader(data))
if err := decoder.Decode(&parsed); err != nil {
return ""
}
return sanitizeProjectName(parsed.Name)
}
func sanitizeProjectName(value string) string {
value = strings.TrimSpace(strings.ToLower(value))
if value == "" {
return ""
}
var b strings.Builder
lastDash := false
for _, r := range value {
switch {
case r >= 'a' && r <= 'z', r >= '0' && r <= '9':
b.WriteRune(r)
lastDash = false
case r == '-', r == '_', r == '.', r == ' ':
if !lastDash {
b.WriteByte('-')
lastDash = true
}
}
}
return strings.Trim(b.String(), "-")
}
func uniqueStrings(values []string) []string {
seen := make(map[string]struct{}, len(values))
result := make([]string, 0, len(values))
for _, value := range values {
value = strings.TrimSpace(value)
if value == "" {
continue
}
if _, exists := seen[value]; exists {
continue
}
seen[value] = struct{}{}
result = append(result, value)
}
return result
}
func appendUnique(values []string, extras ...string) []string {
return uniqueStrings(append(values, extras...))
}
func maxConfidence(left, right DiscoveryConfidence) DiscoveryConfidence {
if confidenceRank(right) > confidenceRank(left) {
return right
}
return left
}
func confidenceRank(value DiscoveryConfidence) int {
switch value {
case ConfidenceHigh:
return 4
case ConfidenceMedium:
return 3
case ConfidenceLow:
return 2
case ConfidenceNone:
return 1
default:
return 0
}
}
func fileExists(path string) bool {
info, err := os.Stat(path)
return err == nil && !info.IsDir()
}
func dirExists(path string) bool {
info, err := os.Stat(path)
return err == nil && info.IsDir()
}
func describeDiscovery(defaults DiscoveredDefaults) string {
parts := make([]string, 0, 5)
if defaults.GameVersion != "" {
parts = append(parts, fmt.Sprintf("game=%s", defaults.GameVersion))
}
if defaults.Platform != "" {
parts = append(parts, fmt.Sprintf("platform=%s", defaults.Platform))
}
if len(defaults.ManagedRoots) > 0 {
parts = append(parts, fmt.Sprintf("roots=%s", strings.Join(defaults.ManagedRoots, ",")))
}
if len(defaults.DetectedPackages) > 0 {
parts = append(parts, fmt.Sprintf("packages=%d", len(defaults.DetectedPackages)))
}
if defaults.ExistingLucy.HasAny() {
parts = append(parts, "existing-.lucy=advisory")
}
if len(parts) == 0 {
return "No server defaults detected"
}
return fmt.Sprintf("Detected defaults (%s confidence): %s", defaults.Confidence, strings.Join(parts, "; "))
}
// Package init defines the UX contract and state machine for the lucy init
// command. It covers the interactive multi-step flow, the non-interactive
// (--yes) fast path, and conflict-resolution semantics for partial .lucy/
// directories.
//
// Init is takeover-first: its optimization target is adopting an existing
// server directory safely, not treating the directory as a mostly blank slate.
// For takeover-class init, Lucy must aggregate current server facts before it
// proposes desired intent. Existing .lucy files remain informative context, but
// they must not silently outrank newer observed reality. Persistent intent
// changes still require explicit operator confirmation at review time.
//
// This file intentionally contains NO huh/bubbletea TUI code. The flow logic is
// pure and testable without a terminal.
package init
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/state"
"github.com/mclucy/lucy/types"
)
// InitOptimizationGoal states what init is trying to optimize for.
type InitOptimizationGoal string
const (
// OptimizationGoalTakeoverExistingServer makes existing-server adoption the
// primary target. Init should prefer reconstructing the current environment
// over inventing a fresh one.
OptimizationGoalTakeoverExistingServer InitOptimizationGoal = "takeover_existing_server"
)
// InitDiscoveryMode distinguishes sequencing from behavior.
type InitDiscoveryMode string
const (
// DiscoveryFirst means discovery happens early in the sequence, but later
// steps may still ignore or overwrite discovered facts.
DiscoveryFirst InitDiscoveryMode = "discovery_first"
// DiscoveryLed means discovery shapes the proposal itself: observed facts are
// the primary input to takeover intent, existing .lucy files are hints unless
// re-confirmed, and the review step must surface any divergence before writes.
DiscoveryLed InitDiscoveryMode = "discovery_led"
)
// InitFactSource identifies which input layer contributed a proposed init fact.
type InitFactSource string
const (
// FactSourceObserved is live filesystem/probe truth from the current server.
FactSourceObserved InitFactSource = "observed"
// FactSourceUserConfirmed is an explicit operator confirmation or override.
// It does not remove the need to observe first; it is the confirmation gate
// before persistent desired state is written.
FactSourceUserConfirmed InitFactSource = "user_confirmed"
// FactSourceExistingLucy is inherited context from pre-existing .lucy files.
// It is informative for takeover, but never silently authoritative.
FactSourceExistingLucy InitFactSource = "existing_lucy"
)
// TakeoverFactPrecedence returns the contract order for takeover-class init
// proposals. Testable rule: live observed state is primary, explicit operator
// confirmation is the approval gate for persisting or overriding, and existing
// .lucy state is the lowest-precedence hint layer.
func TakeoverFactPrecedence() []InitFactSource {
return []InitFactSource{
FactSourceObserved,
FactSourceUserConfirmed,
FactSourceExistingLucy,
}
}
// Constants and types related to the init flow state machine, result construction,
// InitStep names a discrete stage in the init flow.
type InitStep string
const (
// StepWelcome is the opening screen. It explains what lucy init does and
// what files it will create. No input is collected here.
StepWelcome InitStep = "welcome"
// StepGameVersion asks the user for the Minecraft game version (e.g.
// "1.21.4"). This is the only mandatory question for a minimal init.
StepGameVersion InitStep = "game_version"
// StepPlatform asks which primary server runtime to use.
// Valid values: "fabric", "neoforge", "forge", "mcdr", "none"
// "none" means vanilla or an as-yet-unknown platform.
StepPlatform InitStep = "platform"
// StepPlatformVersion asks for the platform loader version. This step is
// skipped when Platform == "none".
StepPlatformVersion InitStep = "platform_version"
// StepSources lets the user configure source priority (modrinth, curseforge,
// github, mcdr). This step is optional and may be skipped in minimal flows.
StepSources InitStep = "sources"
// StepManagedScope lets the user confirm or modify which root directories
// Lucy should manage (e.g. mods/, plugins/, config/). The default list is
// taken from state.ConfigDefaults().Scope.ManagedRoots.
StepManagedScope InitStep = "managed_scope"
// StepPackageClassification lets the user review the detected package graph,
// distinguish leaf packages from graph-only dependencies, and classify them
// into the existing manifest roles without inventing a new persistent role.
StepPackageClassification InitStep = "package_classification"
// StepReview shows the user a complete summary of what will be written
// before any file I/O occurs. Confirmation here sets Confirmed = true.
StepReview InitStep = "review"
// StepDone is the terminal step displayed after files are successfully
// written. No further state changes occur after this step.
StepDone InitStep = "done"
)
// stepOrder is the canonical progression through all steps. NextStep uses this
// to determine the next step after the current one, with optional skipping.
var stepOrder = []InitStep{
StepWelcome,
StepGameVersion,
StepPlatform,
StepPlatformVersion,
StepManagedScope,
StepPackageClassification,
StepReview,
StepDone,
}
// Constants for conflict resolution when pre-existing .lucy/ files are detected.
// ConflictMode determines how init behaves when it detects that one or more
// .lucy/ files already exist.
type ConflictMode string
const (
// PreserveExisting keeps any file that already exists on disk and only
// scaffolds the missing ones. This is the default and makes init
// idempotent: running it twice produces no destructive change.
PreserveExisting ConflictMode = "preserve"
// AbortOnConflict refuses to write anything if ANY target file already
// exists. The user must resolve manually or choose a different mode.
AbortOnConflict ConflictMode = "abort"
// OverwriteAll writes all files regardless of what currently exists on disk.
// Existing content is replaced. The user must explicitly opt into this mode.
OverwriteAll ConflictMode = "overwrite"
)
// Types for representing the flow state and results.
// InitFlowState holds the mutable state accumulated as the user progresses
// through the init flow. It is passed by pointer through every step so that
// both the interactive TUI and the non-interactive fast path share one model.
type InitFlowState struct {
// OptimizationGoal declares the contract this init flow is aiming at.
OptimizationGoal InitOptimizationGoal
// DiscoveryMode documents whether init is only ordered discovery-first or is
// behaviorally discovery-led for takeover.
DiscoveryMode InitDiscoveryMode
// CurrentStep is the step the flow is currently on.
CurrentStep InitStep
// GameVersion is the Minecraft game version the user entered (e.g. "1.21.4").
GameVersion string
// Platform is the chosen server platform identifier.
// Valid values: "fabric", "neoforge", "forge", "mcdr", "none"
Platform string
// PlatformVersion is the chosen loader/platform version.
// Empty when Platform == "none" or when the user skips the step.
PlatformVersion string
// CompatiblePlatforms are extra compatible ecosystems/controller layers that
// can coexist with the primary runtime. Example: neoforge + fabric + sinytra + mcdr.
CompatiblePlatforms []string
// ManagedRoots is the list of relative directory paths Lucy will manage.
// Populated from config defaults on construction; the user may edit it in
// StepManagedScope.
ManagedRoots []string
// PackageClassifications is the in-session takeover graph classification.
// It surfaces all discovered packages, marks whether each package is a leaf
// or a dependency node, and maps operator choices onto the existing manifest
// roles: required, transitive, or ignored.
PackageClassifications []TakeoverPackageClassification
// SourcePriority is the ordered list of package sources.
// Populated from config defaults on construction; the user may reorder in
// StepSources.
SourcePriority []string
// Confirmed is true only after the user explicitly approves the summary at
// StepReview. No file I/O or persistent intent mutation must occur before
// this is true.
Confirmed bool
// Aborted is true if the user cancelled the flow before StepReview +
// Confirmed=true. When true, no files have been written.
Aborted bool
// ExistingFiles lists the .lucy/ state files that were already present on
// disk when NewInitFlowState was called.
ExistingFiles []string
// ExistingStateConflicts lists existing state files that could not be safely
// preserved because they were unreadable or invalid.
ExistingStateConflicts []string
// ConflictResolution controls how init handles the ExistingFiles.
// Default: PreserveExisting.
ConflictResolution ConflictMode
// DiscoveredDefaults stores takeover inputs that init will use to propose a
// starting intent before any file is written. Under the takeover-first
// contract, these defaults should come from live observation first and only
// fall back to existing .lucy hints when observation is missing.
DiscoveredDefaults DiscoveredDefaults
// workDir is the project root checked during construction.
workDir string
}
// NewInitFlowState constructs an InitFlowState for the given working directory.
// It probes the .lucy/ directory for pre-existing files and populates defaults
// from state.ConfigDefaults().
func NewInitFlowState(workDir string) *InitFlowState {
defaults := state.ConfigDefaults()
discovered := DiscoverServerDefaults(workDir)
s := &InitFlowState{
OptimizationGoal: OptimizationGoalTakeoverExistingServer,
DiscoveryMode: DiscoveryLed,
CurrentStep: StepWelcome,
SourcePriority: defaults.Sources.Priority,
ConflictResolution: PreserveExisting,
DiscoveredDefaults: discovered,
workDir: workDir,
}
ApplyDiscoveredDefaults(s, discovered)
if len(s.ManagedRoots) == 0 {
s.ManagedRoots = append([]string(nil), defaults.Scope.ManagedRoots...)
}
// Discover which target state files already exist.
targets := []string{
string(state.ConfigFile),
string(state.ManifestFile),
string(state.LockFile),
}
for _, rel := range targets {
abs := filepath.Join(workDir, rel)
if _, err := os.Stat(abs); err == nil {
s.ExistingFiles = append(s.ExistingFiles, rel)
}
}
if _, exists := containsExistingFile(s.ExistingFiles, string(state.ConfigFile)); exists {
config, _, err := state.ReadConfig(workDir)
if err != nil {
s.ExistingStateConflicts = append(s.ExistingStateConflicts, formatExistingStateConflict(state.ConfigFile, err))
} else if config != nil {
if len(s.ManagedRoots) == 0 && len(config.Scope.ManagedRoots) > 0 {
s.ManagedRoots = append([]string(nil), config.Scope.ManagedRoots...)
}
if len(config.Sources.Priority) > 0 {
s.SourcePriority = append([]string(nil), config.Sources.Priority...)
}
}
}
if _, exists := containsExistingFile(s.ExistingFiles, string(state.ManifestFile)); exists {
manifest, _, err := state.ReadManifest(workDir)
if err != nil {
s.ExistingStateConflicts = append(s.ExistingStateConflicts, formatExistingStateConflict(state.ManifestFile, err))
} else if manifest != nil {
if strings.TrimSpace(s.GameVersion) == "" && strings.TrimSpace(manifest.Environment.GameVersion) != "" {
s.GameVersion = strings.TrimSpace(manifest.Environment.GameVersion)
}
if strings.TrimSpace(s.Platform) == "" && strings.TrimSpace(manifest.Environment.ModdingPlatform) != "" {
s.Platform = strings.TrimSpace(manifest.Environment.ModdingPlatform)
}
if strings.TrimSpace(s.PlatformVersion) == "" && strings.TrimSpace(manifest.Environment.ModdingPlatformVersion) != "" {
s.PlatformVersion = strings.TrimSpace(manifest.Environment.ModdingPlatformVersion)
}
if len(s.CompatiblePlatforms) == 0 && len(manifest.Environment.CompatiblePlatforms) > 0 {
s.CompatiblePlatforms = append([]string(nil), manifest.Environment.CompatiblePlatforms...)
}
}
}
if _, exists := containsExistingFile(s.ExistingFiles, string(state.LockFile)); exists {
if _, _, err := state.ReadLock(workDir); err != nil {
s.ExistingStateConflicts = append(s.ExistingStateConflicts, formatExistingStateConflict(state.LockFile, err))
}
}
return s
}
func containsExistingFile(files []string, want string) (int, bool) {
for i, file := range files {
if file == want {
return i, true
}
}
return -1, false
}
func formatExistingStateConflict(file state.StateFile, err error) string {
return fmt.Sprintf("%s exists but could not be preserved safely: %v", file, err)
}
// RefreshObservedStateAfterInitWrites refreshes probe state for the initialized
// directory so any subsequent takeover/status reads see post-init filesystem
// reality rather than stale memoized observations.
func RefreshObservedStateAfterInitWrites(workDir string) {
probe.RefreshServerInfo(workDir)
}
// Step machine logic
// NextStep returns the step that should follow the current state, applying any
// conditional skips. It does not mutate s; the caller is responsible for
// updating s.CurrentStep.
//
// Rules:
// - StepPlatformVersion is skipped when Platform == "" or Platform == "none".
// - StepDone has no successor; returning StepDone from StepDone is a no-op
// sentinel.
func NextStep(s *InitFlowState) InitStep {
cur := s.CurrentStep
for i, step := range stepOrder {
if step != cur {
continue
}
// Found current step; find the next non-skipped step.
for _, next := range stepOrder[i+1:] {
if shouldSkip(s, next) {
continue
}
return next
}
// No non-skipped step found after current — stay at StepDone.
return StepDone
}
// currentStep not found in order (shouldn't happen); default to done.
return StepDone
}
// shouldSkip reports whether step should be skipped given the current flow
// state.
func shouldSkip(s *InitFlowState, step InitStep) bool {
switch step {
case StepPlatformVersion:
// Skip if no platform was selected or platform is vanilla/none.
return s.Platform == "" || s.Platform == "none"
case StepPackageClassification:
return len(s.PackageClassifications) == 0
}
return false
}
type TakeoverPackageClassification struct {
ID string
Version string
Source string
Role state.ManifestRole
Side state.ManifestSide
Optional bool
Pinned bool
Leaf bool
Requires []string
RequiredBy []string
}
func BuildTakeoverPackageClassifications(packages []types.Package) []TakeoverPackageClassification {
classifications := make(map[string]TakeoverPackageClassification, len(packages))
nameIndex := make(map[string][]string)
for _, pkg := range packages {
id := pkg.Id.StringPlatformName()
if !takeoverPackageIDAllowed(pkg.Id) {
continue
}
classifications[id] = TakeoverPackageClassification{
ID: id,
Version: takeoverManifestVersion(pkg.Id.Version),
Source: takeoverManifestSource(pkg.Remote),
Role: state.RoleTransitive,
Side: state.SideUnknown,
}
name := strings.TrimSpace(pkg.Id.Name.String())
nameIndex[name] = append(nameIndex[name], id)
}
for name := range nameIndex {
sort.Strings(nameIndex[name])
}
for _, pkg := range packages {
fromID := pkg.Id.StringPlatformName()
classification, ok := classifications[fromID]
if !ok {
continue
}
for _, depID := range resolveTakeoverDependencyTargets(pkg, classifications, nameIndex) {
classification.Requires = appendUniqueStrings(classification.Requires, depID)
dep := classifications[depID]
dep.RequiredBy = appendUniqueStrings(dep.RequiredBy, fromID)
classifications[depID] = dep
}
classifications[fromID] = classification
}
result := make([]TakeoverPackageClassification, 0, len(classifications))
for _, classification := range classifications {
sort.Strings(classification.Requires)
sort.Strings(classification.RequiredBy)
classification.Leaf = len(classification.RequiredBy) == 0
if classification.Leaf {
classification.Role = state.RoleRequired
}
result = append(result, classification)
}
sort.Slice(result, func(i, j int) bool {
return result[i].ID < result[j].ID
})
return result
}
func takeoverPackageIDAllowed(id types.PackageId) bool {
if strings.TrimSpace(id.Name.String()) == "" {
return false
}
if !id.Platform.Valid() || id.Platform == types.PlatformAny || id.Platform == types.PlatformUnknown || id.Platform == types.PlatformMinecraft {
return false
}
return true
}
func resolveTakeoverDependencyTargets(pkg types.Package, classifications map[string]TakeoverPackageClassification, nameIndex map[string][]string) []string {
if pkg.Dependencies == nil {
return nil
}
targets := make([]string, 0, len(pkg.Dependencies.Value))
for _, dep := range pkg.Dependencies.Value {
if dep.Embedded {
continue
}
depID := dep.Id.StringPlatformName()
if _, ok := classifications[depID]; ok {
targets = appendUniqueStrings(targets, depID)
continue
}
if dep.Id.Platform == types.PlatformAny || dep.Id.Platform == types.PlatformUnknown {
matches := nameIndex[strings.TrimSpace(dep.Id.Name.String())]
if len(matches) == 1 {
targets = appendUniqueStrings(targets, matches[0])
}
}
}
return targets
}
func takeoverManifestVersion(version types.RawVersion) string {
return state.NormalizeManifestVersionIntent(version)
}
func takeoverManifestSource(remote *types.PackageRemote) string {
if remote == nil {
return "auto"
}
source := strings.TrimSpace(remote.Source.String())
if source == "" || source == "unknown" {
return "auto"
}
return source
}
func appendUniqueStrings(existing []string, values ...string) []string {
seen := make(map[string]struct{}, len(existing))
for _, value := range existing {
seen[value] = struct{}{}
}
for _, value := range values {
value = strings.TrimSpace(value)
if value == "" {
continue
}
if _, ok := seen[value]; ok {
continue
}
existing = append(existing, value)
seen[value] = struct{}{}
}
return existing
}
func applyTakeoverPackageSelections(s *InitFlowState, requiredLeafIDs, ignoredIDs []string) {
requiredSet := make(map[string]struct{}, len(requiredLeafIDs))
ignoredSet := make(map[string]struct{}, len(ignoredIDs))
for _, id := range requiredLeafIDs {
requiredSet[id] = struct{}{}
}
for _, id := range ignoredIDs {
ignoredSet[id] = struct{}{}
}
for i := range s.PackageClassifications {
classification := &s.PackageClassifications[i]
if _, ignored := ignoredSet[classification.ID]; ignored {
classification.Role = state.RoleIgnored
continue
}
if classification.Leaf {
if _, required := requiredSet[classification.ID]; required {
classification.Role = state.RoleRequired
} else {
classification.Role = state.RoleTransitive
}
continue
}
classification.Role = state.RoleTransitive
}
}
// CanProceed reports whether enough information has been collected to write
// valid state files. The minimum required fields are GameVersion and a
// decision on ManagedRoots.
//
// CanProceed does NOT check Confirmed; callers must also verify that before
// performing any I/O. This preserves the takeover contract distinction between
// discovery-led proposal building and explicit user-approved persistence.
func CanProceed(s *InitFlowState) bool {
if s.GameVersion == "" {
return false
}
if err := ValidatePlatformSelection(s.Platform, s.CompatiblePlatforms); err != nil {
return false
}
if len(s.ManagedRoots) == 0 {
return false
}
return true
}
func ValidatePlatformSelection(primary string, compatible []string) error {
return state.ValidateManifestEnvironment(state.ManifestEnvironment{
ModdingPlatform: primary,
CompatiblePlatforms: compatible,
})
}
// Types for the final result of the flow and error conditions during result construction.
type Manifest = state.Manifest
type Lock = state.Lock
// InitFlowResult is returned by BuildResult once the user has confirmed. It
// describes exactly what will be written and what will be preserved.
type InitFlowResult struct {
// ConfigToWrite is the Config value that init will marshal to
// .lucy/config.toml. Nil means the existing file will be preserved
// (ConflictResolution == PreserveExisting and the file was found).
ConfigToWrite *state.Config
// ManifestToWrite is the Manifest that init will marshal to
// .lucy/manifest.json. Nil means preserve existing.
ManifestToWrite *Manifest
// LockToWrite is the empty Lock skeleton that init scaffolds in
// .lucy/lock.json. Nil means preserve existing.
LockToWrite *Lock
// SkippedFiles lists the state-file paths that were preserved because
// ConflictResolution == PreserveExisting and they already existed.
SkippedFiles []string
// WrittenFiles lists the state-file paths that will be (or were) written.
WrittenFiles []string
}
// BuildResult constructs an InitFlowResult from the completed flow state.
// It respects ConflictResolution and returns an error if AbortOnConflict
// would be violated or if CanProceed returns false.
//
// BuildResult does NOT perform any file I/O. It only produces a plan.
// The actual writes are performed by the caller.
func BuildResult(s *InitFlowState) (InitFlowResult, error) {
if !CanProceed(s) {
return InitFlowResult{}, &ErrFlowIncomplete{State: s}
}
if len(s.ExistingStateConflicts) > 0 {
return InitFlowResult{}, &ErrConflict{
Mode: s.ConflictResolution,
ConflictFiles: append([]string(nil), s.ExistingStateConflicts...),
}
}
existingSet := make(map[string]bool, len(s.ExistingFiles))
for _, f := range s.ExistingFiles {
existingSet[f] = true
}
// AbortOnConflict: refuse if any target file already exists.
if s.ConflictResolution == AbortOnConflict && len(s.ExistingFiles) > 0 {
return InitFlowResult{}, &ErrConflict{
Mode: AbortOnConflict,
ConflictFiles: s.ExistingFiles,
}
}
result := InitFlowResult{}
// Helper: decide whether to write a given state file.
willWrite := func(rel string) bool {
if s.ConflictResolution == OverwriteAll {
return true
}
// PreserveExisting: write only if file was NOT found on disk.
return !existingSet[rel]
}
// config.toml
cfgPath := string(state.ConfigFile)
if willWrite(cfgPath) {
cfg := state.ConfigDefaults()
cfg.Scope.ManagedRoots = s.ManagedRoots
cfg.Sources.Priority = s.SourcePriority
result.ConfigToWrite = &cfg
result.WrittenFiles = append(result.WrittenFiles, cfgPath)
} else {
result.SkippedFiles = append(result.SkippedFiles, cfgPath)
}
// manifest.json
mfPath := string(state.ManifestFile)
if willWrite(mfPath) {
mf := state.ManifestDefaults()
mf.Environment.GameVersion = s.GameVersion
mf.Environment.ModdingPlatform = s.Platform
mf.Environment.ModdingPlatformVersion = s.PlatformVersion
mf.Environment.CompatiblePlatforms = append([]string(nil), s.CompatiblePlatforms...)
mf.Packages = state.ManifestPackagesFromClassified(classifiedPackagesForManifest(s.PackageClassifications))
result.ManifestToWrite = &mf
result.WrittenFiles = append(result.WrittenFiles, mfPath)
} else {
result.SkippedFiles = append(result.SkippedFiles, mfPath)
}
// lock.json
lkPath := string(state.LockFile)
if willWrite(lkPath) {
lk := state.NewLock()
populateInitLockMetadata(&lk, s, result.ManifestToWrite)
result.LockToWrite = &lk
result.WrittenFiles = append(result.WrittenFiles, lkPath)
} else {
result.SkippedFiles = append(result.SkippedFiles, lkPath)
}
return result, nil
}
func populateInitLockMetadata(lock *state.Lock, s *InitFlowState, manifest *state.Manifest) {
if lock == nil || s == nil {
return
}
resolvedManifest := manifest
if resolvedManifest == nil {
if existingManifest, _, err := state.ReadManifest(s.workDir); err == nil && existingManifest != nil {
resolvedManifest = existingManifest
}
}
if resolvedManifest != nil {
if data, err := state.SerializeManifest(resolvedManifest); err == nil {
sum := sha256.Sum256(data)
lock.ManifestFingerprint = "sha256:" + hex.EncodeToString(sum[:])
}
lock.GameVersion = strings.TrimSpace(resolvedManifest.Environment.GameVersion)
lock.Platform = strings.TrimSpace(resolvedManifest.Environment.ModdingPlatform)
lock.PlatformVersion = strings.TrimSpace(resolvedManifest.Environment.ModdingPlatformVersion)
}
if lock.ManifestFingerprint == "" {
fallbackManifest := state.ManifestDefaults()
fallbackManifest.Environment.GameVersion = lockMetadataValue(s.GameVersion, s.DiscoveredDefaults.GameVersion)
fallbackManifest.Environment.ModdingPlatform = lockMetadataValue(s.Platform, s.DiscoveredDefaults.Platform)
fallbackManifest.Environment.ModdingPlatformVersion = lockMetadataValue(s.PlatformVersion, s.DiscoveredDefaults.PlatformVersion)
fallbackManifest.Environment.CompatiblePlatforms = append([]string(nil), s.CompatiblePlatforms...)
fallbackManifest.Packages = state.ManifestPackagesFromClassified(classifiedPackagesForManifest(s.PackageClassifications))
if data, err := state.SerializeManifest(&fallbackManifest); err == nil {
sum := sha256.Sum256(data)
lock.ManifestFingerprint = "sha256:" + hex.EncodeToString(sum[:])
}
}
lock.GameVersion = lockMetadataValue(lock.GameVersion, s.GameVersion, s.DiscoveredDefaults.GameVersion, types.VersionUnknown.String())
lock.Platform = lockMetadataValue(lock.Platform, s.Platform, s.DiscoveredDefaults.Platform, string(types.PlatformNone))
lock.PlatformVersion = lockMetadataValue(lock.PlatformVersion, s.PlatformVersion, s.DiscoveredDefaults.PlatformVersion, types.VersionUnknown.String())
}
func lockMetadataValue(values ...string) string {
for _, value := range values {
if trimmed := strings.TrimSpace(value); trimmed != "" {
return trimmed
}
}
return ""
}
func classifiedPackagesForManifest(classifications []TakeoverPackageClassification) []state.ClassifiedPackage {
packages := make([]state.ClassifiedPackage, 0, len(classifications))
for _, classification := range classifications {
packages = append(packages, state.ClassifiedPackage{
ID: classification.ID,
Version: classification.Version,
Source: classification.Source,
Role: classification.Role,
Side: classification.Side,
Optional: classification.Optional,
Pinned: classification.Pinned,
})
}
return packages
}
// Errors for flow validation and conflict detection.
// ErrFlowIncomplete is returned when BuildResult is called on an incomplete
// flow state (CanProceed returns false).
type ErrFlowIncomplete struct {
State *InitFlowState
}
func (e *ErrFlowIncomplete) Error() string {
return "init flow is incomplete: game version and managed roots are required"
}
// ErrConflict is returned when ConflictResolution == AbortOnConflict and one
// or more target files already exist.
type ErrConflict struct {
Mode ConflictMode
ConflictFiles []string
}
func (e *ErrConflict) Error() string {
return "init aborted: one or more .lucy/ files already exist (use --conflict=overwrite to replace or --conflict=preserve to keep them)"
}
package init
import (
"errors"
"fmt"
"strings"
"charm.land/huh/v2"
"github.com/mclucy/lucy/state"
)
// RunInteractiveInit walks the user through the interactive init flow via huh
// forms, populating s in-place. Sets s.Aborted=true on cancellation at any
// step, s.Confirmed=true on final approval. No file I/O occurs here.
func RunInteractiveInit(s *InitFlowState) error {
var continueInit bool
welcomeForm := huh.NewForm(
huh.NewGroup(
huh.NewNote().
Title("Welcome to Lucy").
Description(
"lucy init sets up a new Lucy-managed Minecraft server environment in the\n"+
"current directory. It will create the following files:\n\n"+
" .lucy/config.toml – policy and source defaults\n"+
" .lucy/manifest.toml – soft environment intent (game version, runtime, compatible platforms, mods)\n"+
" .lucy/lock.json – exact resolved facts (versions, hashes, paths, provenance)\n\n"+
"No files will be written until you confirm at the final review step.",
),
huh.NewConfirm().
Title("Continue with setup?").
Affirmative("Yes, let's go").
Negative("Cancel").
Value(&continueInit),
),
)
if err := welcomeForm.Run(); err != nil {
if isUserAbort(err) {
s.Aborted = true
return nil
}
return fmt.Errorf("welcome step: %w", err)
}
if !continueInit {
s.Aborted = true
return nil
}
s.CurrentStep = StepGameVersion
if len(s.ExistingFiles) > 0 {
conflictDesc := fmt.Sprintf(
"The following Lucy files already exist in this directory:\n\n %s\n\n"+
"How should lucy init handle them?",
strings.Join(s.ExistingFiles, "\n "),
)
if len(s.ExistingStateConflicts) > 0 {
conflictDesc += "\n\nConflicts to resolve before writing:\n\n " + strings.Join(s.ExistingStateConflicts, "\n ")
}
conflictMode := string(s.ConflictResolution)
conflictForm := huh.NewForm(
huh.NewGroup(
huh.NewNote().
Title("Existing Files Detected").
Description(conflictDesc),
huh.NewSelect[string]().
Title("Conflict resolution").
Options(
huh.NewOption("Keep existing files, only scaffold missing ones (recommended)", string(PreserveExisting)),
huh.NewOption("Abort if any file exists – do nothing", string(AbortOnConflict)),
huh.NewOption("Overwrite everything – replace all existing files", string(OverwriteAll)),
).
Value(&conflictMode),
),
)
if err := conflictForm.Run(); err != nil {
if isUserAbort(err) {
s.Aborted = true
return nil
}
return fmt.Errorf("conflict resolution step: %w", err)
}
s.ConflictResolution = ConflictMode(conflictMode)
if s.ConflictResolution == AbortOnConflict {
s.Aborted = true
fmt.Printf("\nInit aborted: existing files would be overwritten. Use --conflict=overwrite to replace them.\n")
return nil
}
}
if len(s.ExistingStateConflicts) > 0 {
s.Aborted = true
fmt.Printf("\nInit aborted: existing Lucy state has conflicts that must be resolved first.\n %s\n", strings.Join(s.ExistingStateConflicts, "\n "))
return nil
}
gameVersionPlaceholder := "1.21.4"
if s.DiscoveredDefaults.GameVersion != "" {
gameVersionPlaceholder = s.DiscoveredDefaults.GameVersion
}
gameVersionForm := huh.NewForm(
huh.NewGroup(
huh.NewInput().
Title("Minecraft game version").
Description("Enter the Minecraft server version this environment targets (e.g. 1.21.4).").
Placeholder(gameVersionPlaceholder).
Validate(func(v string) error {
v = strings.TrimSpace(v)
if v == "" {
return errors.New("game version is required")
}
return nil
}).
Value(&s.GameVersion),
),
)
if err := gameVersionForm.Run(); err != nil {
if isUserAbort(err) {
s.Aborted = true
return nil
}
return fmt.Errorf("game version step: %w", err)
}
s.GameVersion = strings.TrimSpace(s.GameVersion)
s.CurrentStep = StepPlatform
platformForm := huh.NewForm(
huh.NewGroup(
huh.NewSelect[string]().
Title("Primary runtime").
Description("Choose the main server runtime Lucy should treat as the primary host environment.").
Options(
huh.NewOption("Fabric – lightweight, fast-updating mod loader", "fabric"),
huh.NewOption("NeoForge – community fork of Forge (recommended for 1.20.2+)", "neoforge"),
huh.NewOption("Forge – original mod loader", "forge"),
huh.NewOption("MCDR – independent controller/plugin framework", "mcdr"),
huh.NewOption("None / Vanilla – no modding platform", "none"),
).
Value(&s.Platform),
),
)
if err := platformForm.Run(); err != nil {
if isUserAbort(err) {
s.Aborted = true
return nil
}
return fmt.Errorf("platform step: %w", err)
}
s.CurrentStep = StepPlatformVersion
compatibleOptions := state.CompatiblePlatformOptions(s.Platform)
if len(compatibleOptions) > 0 {
selected := append([]string(nil), s.CompatiblePlatforms...)
options := make([]huh.Option[string], 0, len(compatibleOptions))
for _, platform := range compatibleOptions {
label := compatiblePlatformLabel(platform)
options = append(options, huh.NewOption(label, platform))
}
compatibleForm := huh.NewForm(
huh.NewGroup(
huh.NewMultiSelect[string]().
Title("Additional compatible platforms").
Description("Select extra compatibility layers Lucy should record alongside the primary runtime. Only valid combinations for the chosen runtime are shown.").
Options(options...).
Value(&selected),
),
)
if err := compatibleForm.Run(); err != nil {
if isUserAbort(err) {
s.Aborted = true
return nil
}
return fmt.Errorf("compatible platforms step: %w", err)
}
s.CompatiblePlatforms = selected
} else {
s.CompatiblePlatforms = nil
}
if err := ValidatePlatformSelection(s.Platform, s.CompatiblePlatforms); err != nil {
return fmt.Errorf("platform selection step: %w", err)
}
if s.Platform != "" && s.Platform != "none" {
platformVersionForm := huh.NewForm(
huh.NewGroup(
huh.NewInput().
Title("Platform loader version").
Description(fmt.Sprintf("Enter the %s loader version, or leave blank to use the latest.", s.Platform)).
Placeholder("latest").
Value(&s.PlatformVersion),
),
)
if err := platformVersionForm.Run(); err != nil {
if isUserAbort(err) {
s.Aborted = true
return nil
}
return fmt.Errorf("platform version step: %w", err)
}
s.PlatformVersion = strings.TrimSpace(s.PlatformVersion)
}
s.CurrentStep = StepManagedScope
allRoots := []string{"mods", "plugins", "config", "datapacks", "resourcepacks"}
managedOpts := make([]huh.Option[string], len(allRoots))
for i, root := range allRoots {
managedOpts[i] = huh.NewOption(root, root)
}
managedRootsForm := huh.NewForm(
huh.NewGroup(
huh.NewMultiSelect[string]().
Title("Managed directories").
Description("Select which directories Lucy should track and manage.").
Options(managedOpts...).
Value(&s.ManagedRoots),
),
)
if err := managedRootsForm.Run(); err != nil {
if isUserAbort(err) {
s.Aborted = true
return nil
}
return fmt.Errorf("managed scope step: %w", err)
}
if len(s.PackageClassifications) > 0 {
s.CurrentStep = StepPackageClassification
requiredLeafIDs := make([]string, 0, len(s.PackageClassifications))
ignoredIDs := make([]string, 0, len(s.PackageClassifications))
leafOptions := make([]huh.Option[string], 0, len(s.PackageClassifications))
ignoreOptions := make([]huh.Option[string], 0, len(s.PackageClassifications))
for _, classification := range s.PackageClassifications {
label := packageClassificationLabel(classification)
ignoreOptions = append(ignoreOptions, huh.NewOption(label, classification.ID))
if classification.Leaf {
leafOptions = append(leafOptions, huh.NewOption(label, classification.ID))
}
if classification.Leaf && classification.Role == state.RoleRequired {
requiredLeafIDs = append(requiredLeafIDs, classification.ID)
}
if classification.Role == state.RoleIgnored {
ignoredIDs = append(ignoredIDs, classification.ID)
}
}
fields := []huh.Field{
huh.NewNote().
Title("Package graph classification").
Description(buildPackageClassificationDescription(s)),
}
if len(leafOptions) > 0 {
fields = append(fields,
huh.NewMultiSelect[string]().
Title("Leaf packages to keep as required").
Description("Leaf nodes are packages nothing else in the discovered graph depends on. Selected leaves become required; unselected leaves fall back to transitive.").
Options(leafOptions...).
Filtering(true).
Height(10).
Value(&requiredLeafIDs),
)
}
fields = append(fields,
huh.NewMultiSelect[string]().
Title("Packages Lucy should ignore").
Description("Ignored packages remain visible in state, but Lucy will leave them outside managed sync.").
Options(ignoreOptions...).
Filtering(true).
Height(10).
Value(&ignoredIDs),
)
classificationForm := huh.NewForm(huh.NewGroup(fields...))
if err := classificationForm.Run(); err != nil {
if isUserAbort(err) {
s.Aborted = true
return nil
}
return fmt.Errorf("package classification step: %w", err)
}
applyTakeoverPackageSelections(s, requiredLeafIDs, ignoredIDs)
}
s.CurrentStep = StepReview
summary := buildSummary(s)
var confirmWrite bool
reviewForm := huh.NewForm(
huh.NewGroup(
huh.NewNote().
Title("Review – Ready to initialize").
Description(summary),
huh.NewConfirm().
Title("Write these files?").
Affirmative("Yes, initialize").
Negative("Cancel").
Value(&confirmWrite),
),
)
if err := reviewForm.Run(); err != nil {
if isUserAbort(err) {
s.Aborted = true
return nil
}
return fmt.Errorf("review step: %w", err)
}
if !confirmWrite {
s.Aborted = true
return nil
}
s.Confirmed = true
s.CurrentStep = StepDone
return nil
}
func buildSummary(s *InitFlowState) string {
var sb strings.Builder
if s.DiscoveredDefaults.Confidence != ConfidenceNone {
sb.WriteString("Observed server facts\n")
sb.WriteString("─────────────────────\n")
obs := s.DiscoveredDefaults
if obs.GameVersion != "" {
_, _ = fmt.Fprintf(&sb, " Game version: %s\n", obs.GameVersion)
} else {
sb.WriteString(" Game version: (not detected)\n")
}
if obs.Platform != "" && obs.Platform != "none" {
_, _ = fmt.Fprintf(&sb, " Runtime: %s", obs.Platform)
if obs.PlatformVersion != "" {
_, _ = fmt.Fprintf(&sb, " %s", obs.PlatformVersion)
}
sb.WriteString("\n")
} else {
sb.WriteString(" Runtime: (not detected)\n")
}
if len(obs.ManagedRoots) > 0 {
_, _ = fmt.Fprintf(&sb, " Directories: %s\n", strings.Join(obs.ManagedRoots, ", "))
}
if len(obs.DetectedPackages) > 0 {
_, _ = fmt.Fprintf(&sb, " Packages: %d detected\n", len(obs.DetectedPackages))
}
_, _ = fmt.Fprintf(&sb, " Confidence: %s\n", obs.Confidence)
sb.WriteString("\n")
}
sb.WriteString("Proposed manifest intent\n")
sb.WriteString("────────────────────────\n")
_, _ = fmt.Fprintf(&sb, " Game version: %s\n", s.GameVersion)
if s.Platform == "" || s.Platform == "none" {
sb.WriteString(" Primary runtime: none (vanilla)\n")
} else {
_, _ = fmt.Fprintf(&sb, " Primary runtime: %s\n", s.Platform)
if s.PlatformVersion != "" {
_, _ = fmt.Fprintf(&sb, " Loader version: %s\n", s.PlatformVersion)
} else {
sb.WriteString(" Loader version: (latest)\n")
}
}
if len(s.CompatiblePlatforms) > 0 {
_, _ = fmt.Fprintf(&sb, " Compatible with: %s\n", strings.Join(s.CompatiblePlatforms, ", "))
}
if len(s.ManagedRoots) > 0 {
_, _ = fmt.Fprintf(&sb, " Managed dirs: %s\n", strings.Join(s.ManagedRoots, ", "))
} else {
sb.WriteString(" Managed dirs: (none selected)\n")
}
_, _ = fmt.Fprintf(&sb, " Conflict mode: %s\n", s.ConflictResolution)
if len(s.ExistingFiles) > 0 {
_, _ = fmt.Fprintf(&sb, " Existing files: %s (will be %s)\n",
strings.Join(s.ExistingFiles, ", "),
conflictModeVerb(s.ConflictResolution),
)
}
sb.WriteString("\n")
if len(s.PackageClassifications) > 0 {
sb.WriteString("Package roles\n")
sb.WriteString("─────────────\n")
var required, transitive, ignored []string
for _, classification := range s.PackageClassifications {
entry := fmt.Sprintf("%s (%s)", classification.ID, packageClassificationKind(classification))
switch classification.Role {
case state.RoleRequired:
required = append(required, entry)
case state.RoleIgnored:
ignored = append(ignored, entry)
default:
transitive = append(transitive, entry)
}
}
if len(required) > 0 {
_, _ = fmt.Fprintf(&sb, " Required: %s\n", strings.Join(required, ", "))
}
if len(transitive) > 0 {
_, _ = fmt.Fprintf(&sb, " Transitive: %s\n", strings.Join(transitive, ", "))
}
if len(ignored) > 0 {
_, _ = fmt.Fprintf(&sb, " Ignored: %s\n", strings.Join(ignored, ", "))
}
sb.WriteString("\n")
}
divergences := buildTakeoverDivergences(s)
if len(divergences) > 0 || len(s.ExistingStateConflicts) > 0 {
sb.WriteString("Conflicts\n")
sb.WriteString("─────────\n")
for _, d := range divergences {
_, _ = fmt.Fprintf(&sb, " ! %s\n", d)
}
for _, c := range s.ExistingStateConflicts {
_, _ = fmt.Fprintf(&sb, " ! %s\n", c)
}
sb.WriteString("\n")
}
sb.WriteString("Files to create:\n")
sb.WriteString(" .lucy/config.toml\n")
sb.WriteString(" .lucy/manifest.toml\n")
sb.WriteString(" .lucy/lock.json\n")
return sb.String()
}
func buildTakeoverDivergences(s *InitFlowState) []string {
hints := s.DiscoveredDefaults.ExistingLucy
if !hints.HasAny() {
return nil
}
obs := s.DiscoveredDefaults
var divergences []string
if obs.GameVersion != "" && hints.GameVersion != "" && obs.GameVersion != hints.GameVersion {
divergences = append(divergences, fmt.Sprintf(
"Game version: observed %q but existing manifest says %q — will use %q",
obs.GameVersion, hints.GameVersion, s.GameVersion,
))
}
if obs.Platform != "" && hints.Platform != "" && obs.Platform != hints.Platform {
divergences = append(divergences, fmt.Sprintf(
"Runtime: observed %q but existing manifest says %q — will use %q",
obs.Platform, hints.Platform, s.Platform,
))
}
if obs.PlatformVersion != "" && hints.PlatformVersion != "" && obs.PlatformVersion != hints.PlatformVersion {
divergences = append(divergences, fmt.Sprintf(
"Loader version: observed %q but existing manifest says %q — will use %q",
obs.PlatformVersion, hints.PlatformVersion, s.PlatformVersion,
))
}
return divergences
}
func conflictModeVerb(mode ConflictMode) string {
switch mode {
case OverwriteAll:
return "overwritten"
case AbortOnConflict:
return "preserved (abort if any exist)"
default:
return "preserved"
}
}
func isUserAbort(err error) bool {
if err == nil {
return false
}
return errors.Is(err, huh.ErrUserAborted)
}
func compatiblePlatformLabel(platform string) string {
switch platform {
case "fabric":
return "Fabric compatibility – allow Fabric-targeted content through a bridge/runtime layer"
case "mcdr":
return "MCDR – independent controller / plugin framework"
case "sinytra":
return "Sinytra – NeoForge bridge layer for Fabric compatibility"
default:
return platform
}
}
func buildPackageClassificationDescription(s *InitFlowState) string {
if len(s.PackageClassifications) == 0 {
return "No discovered packages need classification."
}
var sb strings.Builder
sb.WriteString("Lucy built a package graph from the current server before writing the manifest. Non-leaf nodes are shown as dependencies only; they do not get a separate persistent role.\n\n")
for _, classification := range s.PackageClassifications {
_, _ = fmt.Fprintf(&sb, "- %s\n", packageClassificationLabel(classification))
}
return strings.TrimRight(sb.String(), "\n")
}
func packageClassificationLabel(classification TakeoverPackageClassification) string {
label := fmt.Sprintf("[%s] %s@%s", packageClassificationKind(classification), classification.ID, classification.Version)
if len(classification.RequiredBy) > 0 {
label += fmt.Sprintf(" <- %s", strings.Join(classification.RequiredBy, ", "))
}
return label
}
func packageClassificationKind(classification TakeoverPackageClassification) string {
if classification.Leaf {
return "leaf"
}
return "dependency"
}
package cmd
import (
"fmt"
"strings"
"github.com/mclucy/lucy/types"
)
func ResolvePlatform(fromQuery types.Platform, fromFlag string) (types.Platform, error) {
if fromFlag == "" {
return fromQuery, nil
}
platform := types.Platform(strings.ToLower(strings.TrimSpace(fromFlag)))
if !platform.IsSearchPlatform() {
return types.PlatformAny, fmt.Errorf("invalid --platform %s", fromFlag)
}
if fromQuery == types.PlatformAny {
return platform, nil
}
if fromQuery != platform {
return types.PlatformAny, fmt.Errorf("--platform %s conflicts with query prefix %s", platform, fromQuery)
}
return platform, nil
}
package cmd
import (
"fmt"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "lucy",
Short: "The takeover-first Minecraft server package manager",
SilenceUsage: true,
SilenceErrors: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if noStyle, _ := cmd.Flags().GetBool(flagNoStyleName); noStyle {
tools.TurnOffStyles()
}
if logFile, _ := cmd.Flags().GetBool(flagLogFileName); logFile {
fmt.Println("Log file at", logger.GetLogFile().Name())
}
if printLogs, _ := cmd.Flags().GetBool(flagPrintLogsName); printLogs {
logger.EnablePrintLogs()
}
if debug, _ := cmd.Flags().GetBool(flagDebugName); debug {
logger.EnableDebug()
}
if dumpLogs, _ := cmd.Flags().GetBool(flagDumpLogsName); dumpLogs {
logger.EnableDumpHistory()
}
return nil
},
}
func init() {
rootCmd.PersistentFlags().Bool(flagDebugName, false, "Show debug logs")
rootCmd.PersistentFlags().Bool(flagLogFileName, false, "Output the path to logfile")
rootCmd.PersistentFlags().Bool(flagPrintLogsName, false, "Print logs to console")
rootCmd.PersistentFlags().Bool(flagDumpLogsName, false, "Dump the log history to console before exit")
_ = rootCmd.PersistentFlags().MarkHidden(flagDumpLogsName)
rootCmd.PersistentFlags().Bool(flagNoStyleName, false, "Disable colored and styled output")
rootCmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
fmt.Fprintln(cmd.ErrOrStderr(), err)
cmd.Usage()
return err
})
}
// runWithErrorLogging wraps a RunE function to log errors via logger.ReportError.
// It replaces the decoratorLogAndExitOnError decorator.
func runWithErrorLogging(fn func(cmd *cobra.Command, args []string) error) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
err := fn(cmd, args)
if err != nil {
logger.ReportError(err)
}
return err
}
}
// Execute runs the root command.
func Execute() error {
return rootCmd.Execute()
}
package dependency
import (
"strconv"
"strings"
"github.com/mclucy/lucy/types"
)
func parseMinecraftSnapshot(s types.RawVersion) types.ComparableVersion {
if v := parsePre26WeekSnapshot(s); v != nil {
return v
}
return parsePost26MinecraftSnapshot(s)
}
func parsePre26WeekSnapshot(s types.RawVersion) types.ComparableVersion {
if len(s) < 4 {
return nil
}
index := s[len(s)-1]
v := parseSnapshotWorkCycle(s[:len(s)-1])
if v == nil {
return nil
}
if index < minSnapshotIndex || index > maxSnapshotIndex {
return nil
}
v.Index = index
if !v.Validate() {
return nil
}
return v
}
func parseSnapshotWorkCycle(s types.RawVersion) *Pre26MinecraftSnapshotVersion {
tokens := strings.Split(string(s), "w")
if len(tokens) != 2 {
return nil
}
year, ok := parseUint8(tokens[0])
if !ok {
return nil
}
workCycle, ok := parseUint8(tokens[1])
if !ok {
return nil
}
return &Pre26MinecraftSnapshotVersion{
Year: year,
WorkCycle: workCycle,
Index: 0,
}
}
func parsePost26MinecraftSnapshot(s types.RawVersion) types.ComparableVersion {
str := string(s)
if !strings.Contains(str, "-snapshot-") {
return nil
}
tokens := strings.Split(str, "-snapshot-")
if len(tokens) != 2 {
return nil
}
core := strings.Split(tokens[0], ".")
if len(core) != 2 {
return nil
}
year, ok := parseUint8(core[0])
if !ok {
return nil
}
update, ok := parseUint8(core[1])
if !ok {
return nil
}
snapshotN, ok := parseUint8(tokens[1])
if !ok {
return nil
}
v := &Post26MinecraftSnapshotVersion{
Year: year,
Update: update,
SnapshotN: snapshotN,
}
if !v.Validate() {
return nil
}
return v
}
func parseMinecraftRelease(s types.RawVersion) types.ComparableVersion {
if v := parsePost26MinecraftRelease(s); v != nil {
return v
}
return parsePre26MinecraftRelease(s)
}
func parsePost26MinecraftRelease(s types.RawVersion) types.ComparableVersion {
str := string(s)
if str == "" || strings.Contains(str, "-snapshot-") {
return nil
}
core, suffix := splitCoreAndSuffix(str)
coreTokens := strings.Split(core, ".")
if len(coreTokens) < 2 || len(coreTokens) > 3 {
return nil
}
year, ok := parseUint8(coreTokens[0])
if !ok || year < minPost26ReleaseYear {
return nil
}
update, ok := parseUint8(coreTokens[1])
if !ok {
return nil
}
v := &MinecraftVersion{
Year: year,
Update: update,
Post26: true,
}
if len(coreTokens) == 3 {
hotfix, ok := parseUint8(coreTokens[2])
if !ok {
return nil
}
v.Hotfix = hotfix
}
if suffix != "" {
prerelease, number, ok := parseMinecraftPrereleaseSuffix(suffix)
if !ok {
return nil
}
v.Prerelease = prerelease
v.PrereleaseNumber = number
}
if !v.Validate() {
return nil
}
return v
}
func parsePre26MinecraftRelease(s types.RawVersion) types.ComparableVersion {
str := string(s)
if str == "" {
return nil
}
core, suffix := splitCoreAndSuffix(str)
coreTokens := strings.Split(core, ".")
if len(coreTokens) < 2 || len(coreTokens) > 3 {
return nil
}
year, ok := parseUint8(coreTokens[0])
if !ok || year >= minPost26ReleaseYear {
return nil
}
update, ok := parseUint8(coreTokens[1])
if !ok {
return nil
}
v := &MinecraftVersion{
Year: year,
Update: update,
Post26: false,
}
if len(coreTokens) == 3 {
hotfix, ok := parseUint8(coreTokens[2])
if !ok {
return nil
}
v.Hotfix = hotfix
}
if suffix != "" {
prerelease, number, ok := parseMinecraftPrereleaseSuffix(suffix)
if !ok {
return nil
}
v.Prerelease = prerelease
v.PrereleaseNumber = number
}
if !v.Validate() {
return nil
}
return v
}
func splitCoreAndSuffix(raw string) (core string, suffix string) {
core = raw
if idx := strings.IndexByte(raw, '-'); idx >= 0 {
core = raw[:idx]
suffix = raw[idx+1:]
}
return core, suffix
}
func parseMinecraftPrereleaseSuffix(suffix string) (
PrereleaseType,
uint8,
bool,
) {
if strings.HasPrefix(suffix, "pre") {
number, ok := parsePrereleaseNumber(strings.TrimPrefix(suffix, "pre"))
if !ok {
return "", 0, false
}
return Post26Prerelease, number, true
}
if strings.HasPrefix(suffix, "rc") {
number, ok := parsePrereleaseNumber(strings.TrimPrefix(suffix, "rc"))
if !ok {
return "", 0, false
}
return Post26ReleaseCandidate, number, true
}
return "", 0, false
}
func parsePrereleaseNumber(s string) (uint8, bool) {
s = strings.TrimPrefix(s, "-")
if s == "" {
return 0, false
}
n, ok := parseUint8(s)
if !ok || n == 0 {
return 0, false
}
return n, true
}
func parseUint8(s string) (uint8, bool) {
if s == "" {
return 0, false
}
v, err := strconv.Atoi(s)
if err != nil || v < 0 || v > 255 {
return 0, false
}
return uint8(v), true
}
package dependency
import (
"strconv"
"github.com/mclucy/lucy/types"
)
const (
maxSnapshotWeek = 54
maxSnapshotIndex = uint8('h')
minSnapshotIndex = uint8('a')
minPost26ReleaseYear = 26
)
// docs:
// https://zh.minecraft.wiki/w/%E7%89%88%E6%9C%AC%E6%A0%BC%E5%BC%8F
// https://www.minecraft.net/en-us/article/minecraft-new-version-numbering-system
type Pre26MinecraftSnapshotVersion struct {
Year uint8 // the last two digits of the year, e.g. 24 for 2024
WorkCycle uint8 // the week of the year
Index uint8 // the letter, stored as ASCII values directly
}
func (v1 *Pre26MinecraftSnapshotVersion) Compare(v2 types.ComparableVersion) (
int,
bool,
) {
o, ok := v2.(*Pre26MinecraftSnapshotVersion)
if !ok || v1 == nil || o == nil {
return 0, false
}
if cmp := compareUint8(v1.Year, o.Year); cmp != 0 {
return cmp, true
}
if cmp := compareUint8(v1.WorkCycle, o.WorkCycle); cmp != 0 {
return cmp, true
}
return compareUint8(v1.Index, o.Index), true
}
func (v1 *Pre26MinecraftSnapshotVersion) Validate() bool {
if v1 == nil {
return false
}
return v1.Year != 0 &&
v1.WorkCycle > 0 && v1.WorkCycle <= maxSnapshotWeek &&
v1.Index >= minSnapshotIndex && v1.Index <= maxSnapshotIndex
}
func (v1 *Pre26MinecraftSnapshotVersion) Scheme() types.VersionScheme {
return types.MinecraftSnapshot
}
// Post26MinecraftSnapshotVersion represents snapshots after the new numbering
// scheme (for example 26.1-snapshot-1).
type Post26MinecraftSnapshotVersion struct {
Year uint8
Update uint8
SnapshotN uint8
}
func (v1 *Post26MinecraftSnapshotVersion) Compare(v2 types.ComparableVersion) (
int,
bool,
) {
o, ok := v2.(*Post26MinecraftSnapshotVersion)
if !ok || v1 == nil || o == nil {
return 0, false
}
if cmp := compareUint8(v1.Year, o.Year); cmp != 0 {
return cmp, true
}
if cmp := compareUint8(v1.Update, o.Update); cmp != 0 {
return cmp, true
}
return compareUint8(v1.SnapshotN, o.SnapshotN), true
}
func (v1 *Post26MinecraftSnapshotVersion) Validate() bool {
if v1 == nil {
return false
}
return v1.Year >= minPost26ReleaseYear &&
v1.Update > 0 &&
v1.SnapshotN > 0
}
func (v1 *Post26MinecraftSnapshotVersion) Scheme() types.VersionScheme {
return types.MinecraftSnapshot
}
type MinecraftVersion struct {
Year uint8
Update uint8
Hotfix uint8
Prerelease PrereleaseType
PrereleaseNumber uint8
Post26 bool
}
type PrereleaseType string
const (
Post26Prerelease PrereleaseType = "pre"
Post26ReleaseCandidate PrereleaseType = "rc"
)
func (v *MinecraftVersion) Title() string {
if v == nil {
return ""
}
if v.Post26 {
base := strconv.Itoa(int(v.Year)) + "." + strconv.Itoa(int(v.Update))
if v.Prerelease != "" {
return base + "-" + string(v.Prerelease) + "-" + strconv.Itoa(int(v.PrereleaseNumber))
}
if v.Hotfix > 0 {
return base + "." + strconv.Itoa(int(v.Hotfix))
}
return base
}
base := strconv.Itoa(int(v.Year)) + "." + strconv.Itoa(int(v.Update))
if v.Hotfix > 0 {
base += "." + strconv.Itoa(int(v.Hotfix))
}
if v.Prerelease != "" {
return base + "-" + string(v.Prerelease) + strconv.Itoa(int(v.PrereleaseNumber))
}
return base
}
func (v1 *MinecraftVersion) Compare(v2 types.ComparableVersion) (int, bool) {
o, ok := v2.(*MinecraftVersion)
if !ok || v1 == nil || o == nil {
return 0, false
}
if v1.Post26 != o.Post26 {
return 0, false
}
if cmp := compareUint8(v1.Year, o.Year); cmp != 0 {
return cmp, true
}
if cmp := compareUint8(v1.Update, o.Update); cmp != 0 {
return cmp, true
}
if cmp := compareUint8(v1.Hotfix, o.Hotfix); cmp != 0 {
return cmp, true
}
if cmp := compareUint8(
prereleaseRank(v1.Prerelease),
prereleaseRank(o.Prerelease),
); cmp != 0 {
return cmp, true
}
return compareUint8(v1.PrereleaseNumber, o.PrereleaseNumber), true
}
func (v1 *MinecraftVersion) Validate() bool {
if v1 == nil {
return false
}
if v1.Year == 0 || v1.Update == 0 {
return false
}
if v1.Post26 && v1.Year < minPost26ReleaseYear {
return false
}
switch v1.Prerelease {
case "":
return true
case Post26Prerelease, Post26ReleaseCandidate:
if v1.PrereleaseNumber == 0 {
return false
}
if v1.Post26 {
return v1.Hotfix == 0
}
return true
default:
return false
}
}
func (v1 *MinecraftVersion) Scheme() types.VersionScheme {
return types.MinecraftRelease
}
func prereleaseRank(pr PrereleaseType) uint8 {
switch pr {
case Post26Prerelease:
return 0
case Post26ReleaseCandidate:
return 1
default:
return 2
}
}
func compareUint8(a, b uint8) int {
if a < b {
return -1
}
if a > b {
return 1
}
return 0
}
package dependency
import (
"fmt"
"github.com/mclucy/lucy/types"
)
// ErrAmbiguousVersion is returned when attempting to parse an ambiguous version constant.
var ErrAmbiguousVersion = fmt.Errorf("attempting to parse an ambiguous version")
// Parse is the main function to parse a RawVersion into a ComparableVersion.
//
// If the raw version is one of the special constants (which should be inferred
// before passing to this function), it returns an error.
//
// It dispatches parsing by version scheme and returns nil when parsing fails.
func Parse(
raw types.RawVersion,
scheme types.VersionScheme,
) (types.ComparableVersion, error) {
switch raw {
case types.VersionLatest, types.VersionCompatible, types.VersionNone, types.VersionAny, types.VersionUnknown:
return nil, fmt.Errorf("%w: %s", ErrAmbiguousVersion, raw)
}
switch scheme {
case types.Semver:
return parseSemver(raw), nil
case types.MinecraftRelease:
return parseMinecraftRelease(raw), nil
case types.MinecraftSnapshot:
return parseMinecraftSnapshot(raw), nil
default:
return nil, nil
}
}
package dependency
import (
"strings"
"github.com/mclucy/lucy/types"
)
// VersionRangeDialect defines the grammar used when parsing a range string.
type VersionRangeDialect uint8
const (
DialectUnknown VersionRangeDialect = iota
// DialectNpmSemver represents MCDR plugin metadata dependency ranges.
// References:
// - https://docs.mcdreforged.com/en/latest/plugin_dev/metadata.html
// - https://docs.npmjs.com/about-semantic-versioning
DialectNpmSemver
// DialectFabricSemver represents Fabric loader range syntax.
// Reference: https://wiki.fabricmc.net/documentation:fabric_mod_json_spec
DialectFabricSemver
// DialectMavenRange are Maven version ranges in mods.toml used by Forge and NeoForge.
// References:
// - https://docs.minecraftforge.net/en/latest/gettingstarted/modfiles/
// - https://maven.apache.org/enforcer/enforcer-rules/versionRanges.html
DialectMavenRange
)
// InferRangeDialect infers the range dialect from package platform.
func InferRangeDialect(platform types.Platform) VersionRangeDialect {
switch platform {
case types.PlatformMCDR:
return DialectNpmSemver
case types.PlatformFabric:
return DialectFabricSemver
case types.PlatformForge, types.PlatformNeoforge:
return DialectMavenRange
default:
return DialectUnknown
}
}
// ParseRange parses range text using the given dialect.
//
// This parser layer is the intended home for syntax-specific operators such as
// '^' and '~'. It expands these operators into basic comparison constraints
// (>, >=, <, <=, =, !=) so that the evaluator layer stays dialect-agnostic.
func ParseRange(
raw string,
dialect VersionRangeDialect,
scheme types.VersionScheme,
) types.VersionConstraintExpression {
if strings.TrimSpace(raw) == "" {
return nil
}
switch dialect {
case DialectNpmSemver:
if scheme != types.Semver {
return nil
}
// MCDR uses space-separated criteria (AND) with operators
// >=, >, <=, <, =, ==, ^, ~ and wildcard versions.
// Reference: https://docs.mcdreforged.com/en/latest/plugin_dev/metadata.html
return parseMcdrSemverRange(raw)
case DialectFabricSemver:
if scheme != types.Semver {
return nil
}
// Fabric semantics: '^' keeps same-major behavior without 0.x special-casing.
// Reference: https://wiki.fabricmc.net/documentation:fabric_mod_json_spec
return parseSemverRange(
raw,
semverRangeOptions{caretMode: caretModeSameMajor},
)
case DialectMavenRange:
if scheme != types.Semver {
return nil
}
return parseMavenRange(raw)
default:
return nil
}
}
// ParseRanges parses multiple range strings as OR alternatives.
//
// This matches the VersionConstraintExpression design where the outer slice
// represents OR clauses and each inner slice represents AND constraints.
//
// If any item resolves to an unconstrained expression (nil/empty), the result
// is unconstrained (nil), because one OR branch already matches all versions.
func ParseRanges(
raws []string,
dialect VersionRangeDialect,
scheme types.VersionScheme,
) types.VersionConstraintExpression {
if len(raws) == 0 {
return nil
}
merged := make(types.VersionConstraintExpression, 0, len(raws))
for _, raw := range raws {
expr := ParseRange(raw, dialect, scheme)
if len(expr) == 0 {
return nil
}
merged = append(merged, expr...)
}
if len(merged) == 0 {
return nil
}
return merged
}
package dependency
import (
"strings"
"github.com/mclucy/lucy/types"
)
func parseMavenRange(raw string) types.VersionConstraintExpression {
raw = strings.TrimSpace(raw)
if raw == "" || raw == "*" || strings.EqualFold(raw, "none") {
return nil
}
parts := splitMavenUnions(raw)
if len(parts) == 0 {
parts = []string{raw}
}
result := make(types.VersionConstraintExpression, 0, len(parts))
for _, part := range parts {
constraints := parseMavenSingleRange(strings.TrimSpace(part))
if len(constraints) == 0 {
continue
}
result = append(result, constraints)
}
if len(result) == 0 {
return nil
}
return result
}
func splitMavenUnions(raw string) []string {
var out []string
depth := 0
start := 0
for i := 0; i < len(raw); i++ {
switch raw[i] {
case '[', '(':
depth++
case ']', ')':
if depth > 0 {
depth--
}
case ',':
if depth == 0 {
part := strings.TrimSpace(raw[start:i])
if part != "" {
out = append(out, part)
}
start = i + 1
}
}
}
if start < len(raw) {
part := strings.TrimSpace(raw[start:])
if part != "" {
out = append(out, part)
}
}
return out
}
func parseMavenSingleRange(raw string) []types.VersionConstraint {
if raw == "" {
return nil
}
if strings.HasPrefix(raw, "^") || strings.HasPrefix(raw, "~") {
// Not part of Maven version range syntax.
// Forge docs (1.21.x, checked 2026-02-24) point dependency versionRange
// to Maven Version Range syntax, which only defines bracket/parenthesis
// ranges and basic comparison operators.
// References:
// - https://docs.minecraftforge.net/en/latest/gettingstarted/modfiles/
// - https://maven.apache.org/enforcer/enforcer-rules/versionRanges.html
return nil
}
if len(raw) >= 2 {
left := raw[0]
right := raw[len(raw)-1]
if (left == '[' || left == '(') && (right == ']' || right == ')') {
body := strings.TrimSpace(raw[1 : len(raw)-1])
if strings.Contains(body, ",") {
bounds := strings.SplitN(body, ",", 2)
lowerToken := strings.TrimSpace(bounds[0])
upperToken := strings.TrimSpace(bounds[1])
out := make([]types.VersionConstraint, 0, 2)
if lowerToken != "" {
lower := parseSemver(types.RawVersion(lowerToken))
if lower == nil {
return nil
}
op := types.OpGt
if left == '[' {
op = types.OpGte
}
out = append(
out,
types.VersionConstraint{Value: lower, Operator: op},
)
}
if upperToken != "" {
upper := parseSemver(types.RawVersion(upperToken))
if upper == nil {
return nil
}
op := types.OpLt
if right == ']' {
op = types.OpLte
}
out = append(
out,
types.VersionConstraint{Value: upper, Operator: op},
)
}
if len(out) == 0 {
return nil
}
return out
}
// Exact value form: [1.0]
if left == '[' && right == ']' && body != "" {
v := parseSemver(types.RawVersion(body))
if v == nil {
return nil
}
return []types.VersionConstraint{
{
Value: v, Operator: types.OpEq,
},
}
}
return nil
}
}
operator := types.OpEq
versionToken := raw
for _, op := range []struct {
prefix string
operator types.VersionOperator
}{
{prefix: ">=", operator: types.OpGte},
{prefix: "<=", operator: types.OpLte},
{prefix: "!=", operator: types.OpNeq},
{prefix: ">", operator: types.OpGt},
{prefix: "<", operator: types.OpLt},
{prefix: "=", operator: types.OpEq},
} {
if strings.HasPrefix(raw, op.prefix) {
operator = op.operator
versionToken = strings.TrimSpace(strings.TrimPrefix(raw, op.prefix))
break
}
}
v := parseSemver(types.RawVersion(versionToken))
if v == nil {
return nil
}
return []types.VersionConstraint{{Value: v, Operator: operator}}
}
package dependency
import (
"strconv"
"strings"
"github.com/mclucy/lucy/types"
)
type caretMode uint8
const (
caretModeNpm caretMode = iota
caretModeSameMajor
)
type semverRangeOptions struct {
caretMode caretMode
}
// parseMcdrSemverRange parses MCDR dependency range expressions strictly by
// metadata docs: space-separated criteria (AND) with operators
// >=, >, <=, <, =, ==, ^, ~ and wildcard base versions.
// Reference: https://docs.mcdreforged.com/en/latest/plugin_dev/metadata.html
func parseMcdrSemverRange(raw string) types.VersionConstraintExpression {
raw = strings.TrimSpace(raw)
if raw == "" || isWildcardToken(raw) {
return nil
}
tokens := strings.Fields(raw)
if len(tokens) == 0 {
return nil
}
andConstraints := make([]types.VersionConstraint, 0, len(tokens))
for _, token := range tokens {
constraints, ok := parseMcdrSemverCriterion(token)
if !ok {
return nil
}
andConstraints = append(andConstraints, constraints...)
}
if len(andConstraints) == 0 {
return nil
}
return types.VersionConstraintExpression{andConstraints}
}
func parseMcdrSemverCriterion(raw string) ([]types.VersionConstraint, bool) {
raw = strings.TrimSpace(raw)
if raw == "" || isWildcardToken(raw) {
return nil, true
}
operator := ""
versionToken := raw
for _, op := range []string{">=", "<=", "==", ">", "<", "=", "^", "~"} {
if strings.HasPrefix(raw, op) {
operator = op
versionToken = strings.TrimSpace(strings.TrimPrefix(raw, op))
break
}
}
if versionToken == "" {
return nil, false
}
if strings.ContainsAny(versionToken, "xX*") {
switch operator {
case "", "=", "==":
constraints := parseXRange(versionToken)
if constraints == nil {
return nil, false
}
return constraints, true
default:
// Keep implementation strict to documented examples.
return nil, false
}
}
lower := parseSemver(types.RawVersion(versionToken))
if lower == nil {
return nil, false
}
switch operator {
case "", "=", "==":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpEq,
},
}, true
case ">":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpGt,
},
}, true
case ">=":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpGte,
},
}, true
case "<":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpLt,
},
}, true
case "<=":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpLte,
},
}, true
case "^":
constraints := parseCaretRangeFromSemver(lower, caretModeSameMajor)
return constraints, len(constraints) > 0
case "~":
constraints := parseTildeRangeFromSemver(lower, versionToken)
return constraints, len(constraints) > 0
default:
return nil, false
}
}
func parseSemverRange(
raw string,
options semverRangeOptions,
) types.VersionConstraintExpression {
raw = strings.TrimSpace(raw)
if raw == "" || isWildcardToken(raw) {
return nil
}
orParts := strings.Split(raw, "||")
result := make(types.VersionConstraintExpression, 0, len(orParts))
for _, part := range orParts {
part = strings.TrimSpace(part)
if part == "" {
continue
}
if strings.Contains(part, " - ") {
rangeConstraints := parseSemverHyphenRange(part)
if len(rangeConstraints) == 0 {
continue
}
result = append(result, rangeConstraints)
continue
}
tokens := strings.Fields(part)
if len(tokens) == 0 {
continue
}
andConstraints := make([]types.VersionConstraint, 0, len(tokens))
valid := true
for _, token := range tokens {
tokenConstraints, ok := parseSemverToken(token, options)
if !ok {
valid = false
break
}
andConstraints = append(andConstraints, tokenConstraints...)
}
if !valid {
continue
}
if len(andConstraints) == 0 {
// all wildcard/no-op constraints => no constraint for the whole range
return nil
}
result = append(result, andConstraints)
}
if len(result) == 0 {
return nil
}
return result
}
func parseSemverHyphenRange(raw string) []types.VersionConstraint {
tokens := strings.SplitN(raw, " - ", 2)
if len(tokens) != 2 {
return nil
}
left := parseSemver(types.RawVersion(strings.TrimSpace(tokens[0])))
right := parseSemver(types.RawVersion(strings.TrimSpace(tokens[1])))
if left == nil || right == nil {
return nil
}
return []types.VersionConstraint{
{Value: left, Operator: types.OpGte},
{Value: right, Operator: types.OpLte},
}
}
func parseSemverToken(
raw string,
options semverRangeOptions,
) ([]types.VersionConstraint, bool) {
raw = strings.TrimSpace(raw)
if raw == "" || isWildcardToken(raw) {
return nil, true
}
// comma-separated comparators within one token are treated as AND
if strings.Contains(raw, ",") {
parts := strings.Split(raw, ",")
all := make([]types.VersionConstraint, 0, len(parts))
for _, part := range parts {
constraints, ok := parseSemverToken(part, options)
if !ok {
return nil, false
}
all = append(all, constraints...)
}
return all, true
}
for _, op := range []string{
">=", "<=", "!=", "==", ">", "<", "=", "^", "~",
} {
if strings.HasPrefix(raw, op) {
versionToken := strings.TrimSpace(strings.TrimPrefix(raw, op))
if versionToken == "" {
return nil, false
}
return parseSemverOperator(op, versionToken, options)
}
}
if strings.ContainsAny(raw, "xX*") {
constraints := parseXRange(raw)
if constraints == nil && !isWildcardToken(raw) {
return nil, false
}
return constraints, true
}
v := parseSemver(types.RawVersion(raw))
if v == nil {
return nil, false
}
return []types.VersionConstraint{{Value: v, Operator: types.OpEq}}, true
}
func parseSemverOperator(
op string,
versionToken string,
options semverRangeOptions,
) ([]types.VersionConstraint, bool) {
if strings.ContainsAny(versionToken, "xX*") {
switch op {
case "=":
constraints := parseXRange(versionToken)
if constraints == nil {
return nil, false
}
return constraints, true
case "!=":
// This desugars to an OR expression and is intentionally unsupported in
// the single-token operator parser.
return nil, false
}
}
lower := parseSemver(types.RawVersion(versionToken))
if lower == nil && strings.ContainsAny(versionToken, "xX*") {
lower = parseSemverLowerBoundFromWildcard(versionToken)
}
if lower == nil {
return nil, false
}
switch op {
case "==":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpEq,
},
}, true
case "=":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpEq,
},
}, true
case "!=":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpNeq,
},
}, true
case ">":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpGt,
},
}, true
case ">=":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpGte,
},
}, true
case "<":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpLt,
},
}, true
case "<=":
return []types.VersionConstraint{
{
Value: lower, Operator: types.OpLte,
},
}, true
case "^":
return parseCaretRangeFromSemver(lower, options.caretMode), true
case "~":
return parseTildeRangeFromSemver(lower, versionToken), true
default:
return nil, false
}
}
func parseCaretRangeFromSemver(
lower types.ComparableVersion,
mode caretMode,
) []types.VersionConstraint {
sv, ok := lower.(*SemverVersion)
if !ok {
return nil
}
major := sv.Major()
minor := sv.Minor()
patch := sv.Patch()
var upper types.ComparableVersion
if mode == caretModeNpm {
if major > 0 {
upper = NewSemver(major+1, 0, 0)
} else if minor > 0 {
upper = NewSemver(0, minor+1, 0)
} else {
upper = NewSemver(0, 0, patch+1)
}
} else {
upper = NewSemver(major+1, 0, 0)
}
if upper == nil {
return nil
}
return []types.VersionConstraint{
{Value: lower, Operator: types.OpGte},
{Value: upper, Operator: types.OpLt},
}
}
func parseTildeRangeFromSemver(
lower types.ComparableVersion,
raw string,
) []types.VersionConstraint {
sv, ok := lower.(*SemverVersion)
if !ok {
return nil
}
parts := strings.Split(strings.TrimSpace(raw), ".")
hasMinorSpecified := len(parts) >= 2 && !isWildcardToken(parts[1])
var upper types.ComparableVersion
if !hasMinorSpecified {
upper = NewSemver(sv.Major()+1, 0, 0)
} else {
upper = NewSemver(sv.Major(), sv.Minor()+1, 0)
}
if upper == nil {
return nil
}
return []types.VersionConstraint{
{Value: lower, Operator: types.OpGte},
{Value: upper, Operator: types.OpLt},
}
}
func parseXRange(raw string) []types.VersionConstraint {
raw = strings.TrimSpace(raw)
if isWildcardToken(raw) {
return nil
}
parts := strings.Split(raw, ".")
if len(parts) == 0 {
return nil
}
if len(parts) >= 1 && isWildcardToken(parts[0]) {
return nil
}
major, ok := parseUint64(parts[0])
if !ok {
return nil
}
if len(parts) == 1 {
return nil
}
if len(parts) >= 2 && isWildcardToken(parts[1]) {
lower := NewSemver(major, 0, 0)
upper := NewSemver(major+1, 0, 0)
if lower == nil || upper == nil {
return nil
}
return []types.VersionConstraint{
{Value: lower, Operator: types.OpGte},
{Value: upper, Operator: types.OpLt},
}
}
minor, ok := parseUint64(parts[1])
if !ok {
return nil
}
if len(parts) >= 3 && isWildcardToken(parts[2]) {
lower := NewSemver(major, minor, 0)
upper := NewSemver(major, minor+1, 0)
if lower == nil || upper == nil {
return nil
}
return []types.VersionConstraint{
{Value: lower, Operator: types.OpGte},
{Value: upper, Operator: types.OpLt},
}
}
return nil
}
func parseSemverLowerBoundFromWildcard(raw string) types.ComparableVersion {
parts := strings.Split(strings.TrimSpace(raw), ".")
if len(parts) == 0 {
return nil
}
for i := range parts {
if isWildcardToken(parts[i]) {
parts[i] = "0"
}
}
for len(parts) < 3 {
parts = append(parts, "0")
}
if len(parts) > 3 {
parts = parts[:3]
}
return parseSemver(types.RawVersion(strings.Join(parts, ".")))
}
func isWildcardToken(token string) bool {
token = strings.TrimSpace(token)
if token == "" {
return true
}
return token == "*" || token == "x" || token == "X"
}
func parseUint64(token string) (uint64, bool) {
token = strings.TrimSpace(token)
if token == "" {
return 0, false
}
v, err := strconv.ParseUint(token, 10, 64)
if err != nil {
return 0, false
}
return v, true
}
package dependency
import (
"fmt"
semverlib "github.com/Masterminds/semver/v3"
"github.com/mclucy/lucy/types"
)
// SemverVersion implements types.ComparableVersion using Masterminds semver.
type SemverVersion semverlib.Version
// NewSemver creates a SemverVersion from explicit major, minor, patch values.
func NewSemver(major, minor, patch uint64) types.ComparableVersion {
v, err := semverlib.StrictNewVersion(
fmt.Sprintf("%d.%d.%d", major, minor, patch),
)
if err != nil {
return nil
}
return (*SemverVersion)(v)
}
// parseSemver parses a semver string.
func parseSemver(s types.RawVersion) types.ComparableVersion {
v, err := semverlib.NewVersion(string(s))
if err != nil {
return nil
}
return (*SemverVersion)(v)
}
func (s *SemverVersion) Major() uint64 {
if s == nil {
return 0
}
return (*semverlib.Version)(s).Major()
}
func (s *SemverVersion) Minor() uint64 {
if s == nil {
return 0
}
return (*semverlib.Version)(s).Minor()
}
func (s *SemverVersion) Patch() uint64 {
if s == nil {
return 0
}
return (*semverlib.Version)(s).Patch()
}
func (s *SemverVersion) Prerelease() string {
if s == nil {
return ""
}
return (*semverlib.Version)(s).Prerelease()
}
func (s *SemverVersion) Scheme() types.VersionScheme {
return types.Semver
}
func (s *SemverVersion) Compare(other types.ComparableVersion) (int, bool) {
if s == nil || other == nil {
return 0, false
}
o, ok := other.(*SemverVersion)
if !ok || o == nil {
return 0, false
}
return (*semverlib.Version)(s).Compare((*semverlib.Version)(o)), true
}
func (s *SemverVersion) Validate() bool {
if s == nil {
return false
}
return s.Major() != 0 || s.Minor() != 0 || s.Patch() != 0
}
func (s *SemverVersion) String() string {
if s == nil {
return ""
}
return (*semverlib.Version)(s).Original()
}
package exttype
import (
"encoding/json"
"github.com/mclucy/lucy/tools"
)
type FabricEnvironment string
const (
FabricEnvironmentClient FabricEnvironment = "client"
FabricEnvironmentServer FabricEnvironment = "server"
FabricEnvironmentAny FabricEnvironment = "*"
)
// FabricAuthor handles both a plain string and a person object {"name": "..."}
// as allowed by the fabric.mod.json spec.
type FabricAuthor string
func (a *FabricAuthor) UnmarshalJSON(data []byte) error {
var s string
if err := json.Unmarshal(data, &s); err == nil {
*a = FabricAuthor(s)
return nil
}
var obj struct {
Name string `json:"name"`
}
if err := json.Unmarshal(data, &obj); err != nil {
return err
}
*a = FabricAuthor(obj.Name)
return nil
}
// FileFabricModIdentifier represents the structure of fabric.mod.json files found
// in Fabric mods' `.jar` files.
//
// Docs: https://fabricmc.net/wiki/documentation:fabric_mod_json_spec
type FileFabricModIdentifier struct {
SchemaVersion int `json:"schemaVersion"`
Id string `json:"id"`
Version string `json:"version"`
Name string `json:"name"`
Description string `json:"description"`
Authors []FabricAuthor `json:"authors"`
// Fields officially supported:
// - "email"
// - "homepage"
// - "irc"
// - "issues"
// - "sources"
Contact map[string]string `json:"contact"`
// This uses the SPDX format https://spdx.org/licenses/
// TODO: Should implement and check whether other platforms use this too.
License string `json:"license"`
Icon string `json:"icon"`
Environment FabricEnvironment `json:"environment"`
Jars []struct {
File string `json:"file"`
} `json:"jars"`
Entrypoints map[string][]string `json:"-"`
Mixins []string `json:"-"`
AccessWidener string `json:"accessWidener"`
LanguageAdapters map[string]string `json:"-"`
// Depends > Recommends > Suggests
// Breaks > Conflicts
Depends map[string]tools.SingleOrSlice[string] `json:"depends"`
Recommends map[string]tools.SingleOrSlice[string] `json:"recommends"`
Suggests map[string]tools.SingleOrSlice[string] `json:"suggests"`
Breaks map[string]tools.SingleOrSlice[string] `json:"breaks"`
Conflicts map[string]tools.SingleOrSlice[string] `json:"conflicts"`
Custom interface{} `json:"-"`
}
type FileFabricModIdentifierOld struct {
// TODO: See https://wiki.fabricmc.net/documentation:fabric_mod_json_spec
// This is for very old fabric (< 0.4.0). It does not matter much right
// now. Besides, it is poorly documented.
//
// When SchemaVersion is 0 or missing, it is considered old.
}
package github
import (
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
)
// checkGitHubMessage checks if the response data is a GitHub API error message
// Returns the parsed message if it is an error message, nil otherwise
func checkGitHubMessage(data []byte) *GhApiMessage {
var msg *GhApiMessage
err := json.Unmarshal(data, &msg)
if err == nil && msg != nil && msg.Message != "" {
return msg
}
return nil
}
func GetFileFromGitHub(apiEndpoint string) (
err error,
msg *GhApiMessage,
data []byte,
) {
resp, err := http.Get(apiEndpoint)
if err != nil {
return err, nil, nil
}
defer tools.CloseReader(resp.Body, logger.Warn)
data, err = io.ReadAll(resp.Body)
if err != nil {
return err, nil, nil
}
// Check if the response is an error message from GitHub API
if msg := checkGitHubMessage(data); msg != nil {
return nil, msg, data
}
var item GhItem
err = json.Unmarshal(data, &item)
if err != nil {
return fmt.Errorf("%w: %w", ErrCannotDecode, err), nil, nil
}
resp, err = http.Get(item.DownloadUrl)
if err != nil {
return err, nil, nil
}
defer tools.CloseReader(resp.Body, logger.Warn)
data, err = io.ReadAll(resp.Body)
if err != nil {
return err, nil, nil
}
return nil, nil, data
}
func GetDirectoryFromGitHub(apiEndpoint string) (
err error,
msg *GhApiMessage,
items []GhItem,
) {
resp, err := http.Get(apiEndpoint)
if err != nil {
return err, nil, nil
}
defer tools.CloseReader(resp.Body, logger.Warn)
data, err := io.ReadAll(resp.Body)
if err != nil {
return err, nil, nil
}
// Check if the response is an error message from GitHub API
if msg := checkGitHubMessage(data); msg != nil {
return nil, msg, nil
}
var res []GhItem
err = json.Unmarshal(data, &res)
if err != nil {
return fmt.Errorf("%w: %w", ErrCannotDecode, err), nil, nil
}
return nil, nil, res
}
package install
import (
"errors"
"fmt"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/types"
)
type platformInstaller func(p types.Package) error
type Result struct {
Installed []types.Package
Provenance map[string][]string
}
var installers = map[types.Platform]platformInstaller{}
func registerInstaller(platform types.Platform, installer platformInstaller) {
if installer == nil {
panic("install: nil installer")
}
installers[platform] = installer
}
func Install(id types.PackageId, source types.Source, options Options) (*Result, error) {
// for regular (non-identity) packages, delegate to InstallMany to unify
// resolver behavior with batch adds
if !id.IsIdentityPackage() {
return InstallMany([]types.PackageId{id}, source, options)
}
// identity packages go through the established platform installer
if id.Version == types.VersionAny {
id.Version = types.VersionCompatible
}
if err := installPlatform(id); err != nil {
return nil, err
}
return &Result{}, nil
}
func installPlatform(id types.PackageId) error {
id.NormalizeIdentityPackage()
err := id.IsValidIdentityPackage()
if err != nil {
return err
}
serverInfo := probe.ServerInfo()
serverPlatform := serverInfo.Runtime.DerivedModLoader()
hasMcdr := serverInfo.Environments.Mcdr != nil
errExistingPlatform := func() error {
return fmt.Errorf(
"found an existing server platform %s, installation of %s aborted",
serverPlatform.Title(),
id.Platform.Title(),
)
}
switch id.IdentityToPlatform() {
case types.PlatformMinecraft:
if serverPlatform != types.PlatformNone {
// TODO: ask if overwrite existing server
return errors.New("a server is already installed")
}
return installMinecraftServer(id)
case types.PlatformForge:
switch serverPlatform {
case types.PlatformVanilla, types.PlatformNone:
return installForge(id)
default:
return errExistingPlatform()
}
case types.PlatformFabric:
switch serverPlatform {
case types.PlatformUnknown:
return errors.New("unknown mod loader, cannot infer fabric bootstrap artifact")
case types.PlatformFabric:
return errors.New("fabric server already detected, installation aborted")
case types.PlatformForge:
return errors.New("Forge server detected, cannot install Fabric bootstrap")
case types.PlatformNeoforge:
return errors.New("NeoForge server detected, cannot install Fabric bootstrap")
case types.PlatformVanilla:
override, deleteVanilla := promptOverrideVanillaWithFabric()
if !override {
return errors.New("installation aborted by user")
}
return installFabricWithOverride(id, deleteVanilla)
case types.PlatformNone:
default:
return fmt.Errorf(
"unsupported server platform %s for fabric installation",
serverPlatform.Title(),
)
}
return installFabric(id)
case types.PlatformNeoforge:
switch serverPlatform {
case types.PlatformVanilla, types.PlatformNone:
return installNeoForge(id)
default:
return errExistingPlatform()
}
case types.PlatformMCDR:
if hasMcdr {
return errors.New("mcdr already installed")
}
return initMcdr()
default:
return fmt.Errorf("cannot install platform: %s", id.Platform)
}
}
package install
import (
"context"
"fmt"
"os"
"time"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/tui/progress"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/util"
)
var fabricMetaBaseURL = "https://meta.fabricmc.net"
// Docs: https://fabricmc.net/use/server/
// Fabric install bootstraps from the server launch jar and resolves versions via Fabric Meta.
type fabricInstallerVersion struct {
Version string `json:"version"`
Stable bool `json:"stable"`
}
type fabricLoaderVersionEntry struct {
Version string `json:"version"`
Stable bool `json:"stable"`
}
func init() {
registerInstaller(types.PlatformFabric, installFabricMod)
}
func installFabric(p types.PackageId) error {
return installFabricWithOverride(p, false)
}
func installFabricWithOverride(p types.PackageId, deleteVanilla bool) error {
serverInfo := probe.ServerInfo()
var gameVersion string
switch serverInfo.Runtime.DerivedModLoader() {
case types.PlatformVanilla:
gameVersion = string(serverInfo.Runtime.GameVersion)
case types.PlatformNone:
gameVersion = promptSelectMinecraftVersionForFabric()
}
loaderVersion, err := getFabricLoaderVersion(p.Version)
if err != nil {
return fmt.Errorf("resolve fabric loader version failed: %w", err)
}
if gameVersion == "" {
gameVersion, err = getFabricGameVersion(serverInfo.Runtime.GameVersion)
if err != nil {
return fmt.Errorf("cannot install fabric for game version: %w", err)
}
}
installerVersion, err := getLatestFabricInstallerVersion()
if err != nil {
return fmt.Errorf("cannot get fabric loader version: %w", err)
}
artifactUrl := fmt.Sprintf(
"https://meta.fabricmc.net/v2/versions/loader/%s/%s/%s/server/jar",
gameVersion, loaderVersion, installerVersion,
)
tracker := progress.NewTracker("fabric")
defer func() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = progress.WaitForShutdown(ctx)
}()
defer tracker.Close()
result, err := util.CachedDownload(
artifactUrl,
serverInfo.WorkPath,
util.DownloadOptions{
Kind: cache.KindArtifact,
WrapReader: tracker.ProxyReader,
OnCacheHit: tracker.CacheHit,
OnResolvedFilename: func(title string) { tracker.SetTitle(title) },
},
)
if result != nil {
tools.CloseReader(result.File, nil)
}
if err != nil {
return fmt.Errorf("download failed: %w", err)
}
if deleteVanilla {
err = os.Remove(serverInfo.Runtime.PrimaryEntrance)
}
probe.Rebuild()
return nil
}
func installFabricMod(p types.Package) error {
return installModLoaderPackage(p, types.PlatformFabric)
}
package install
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"time"
"charm.land/huh/v2"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/probe"
tuiprogress "github.com/mclucy/lucy/tui/progress"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/util"
)
func getForgeVersionFromPackageId(
p types.PackageId,
gameVersion types.RawVersion,
) (string, error) {
if p.Version != types.VersionLatest && p.Version != types.VersionCompatible && p.Version != types.VersionAny && p.Version != types.VersionUnknown {
return p.Version.String(), nil
}
return fetchForgeVersion(gameVersion)
}
func checkJavaAvailability() error {
_, err := exec.LookPath("java")
if err != nil {
return errors.New("java not found in PATH, Forge requires Java to install")
}
return nil
}
var (
forgeDocsURL = "https://files.minecraftforge.net/"
forgePromotionsURL = "https://files.minecraftforge.net/net/minecraftforge/forge/promotions_slim.json"
forgeMavenBaseURL = "https://maven.minecraftforge.net/net/minecraftforge/forge"
// Forge/NeoForge installation differences (official docs):
// 1) Artifact naming:
// Forge: forge-{mc_version}-{forge_version}-installer.jar
// NeoForge: neoforge-{version}-installer.jar
// 2) Version metadata source:
// Forge: promotions_slim.json on files.minecraftforge.net
// NeoForge: release index from maven.neoforged.net
// 3) Installation command:
// Both use: java -jar <installer>.jar --installServer
forgeNeoForgeDiffDocURL = "https://docs.neoforged.net/user/docs/server"
)
type forgePromotions struct {
Promos map[string]string `json:"promos"`
}
func init() {
registerInstaller(types.PlatformForge, installForgeMod)
}
func installForgeMod(p types.Package) error {
return installModLoaderPackage(p, types.PlatformForge)
}
func guardServerTopologyForForgePlatform() error {
serverInfo := probe.ServerInfo()
serverPlatform := serverInfo.Runtime.DerivedModLoader()
switch serverPlatform {
case types.PlatformFabric, types.PlatformForge, types.PlatformNeoforge:
return fmt.Errorf(
"found an existing server platform %s, installation of forge aborted",
serverPlatform.Title(),
)
}
return nil
}
func promptSupportForgeProject() {
form := huh.NewForm(
huh.NewGroup(
huh.NewNote().
Title("Supporting the Forge project").
Description(
"The Forge project is sustained by ads on the download page. By automating " +
"this process, we may reduce ad revenue that supports the project. If you find " +
"Forge useful, please consider supporting the project by downloading manually " +
"from their official site <https://files.minecraftforge.net>, or support them on " +
"Patreon at <https://www.patreon.com/LexManos>",
),
),
).WithWidth(80)
_ = form.Run()
}
func promptSelectMinecraftVersionForForge() (version string) {
versions, err := fetchForgeSupportedMinecraftVersions()
if err != nil || len(versions) == 0 {
return "error"
}
gameVersions := versions
var installLatest bool
options := huh.NewOptions[string](gameVersions...)
err = huh.NewForm(
huh.NewGroup(
huh.NewConfirm().
Title("No current Minecraft installation found.").
Description("Do you want to install forge with its latest supported Minecraft version?").
Affirmative("Yes, proceed").
Negative("No, select a game version").
Value(&installLatest),
),
).Run()
if err != nil {
return "none"
}
if installLatest {
return gameVersions[len(gameVersions)-1]
}
err = huh.NewForm(
huh.NewGroup(
huh.NewSelect[string]().
Title("Select a Minecraft installation").
Options(options...).
Filtering(true).
Height(10).
Value(&version),
).WithHide(installLatest),
).Run()
if err != nil {
return "none"
}
return
}
func fetchForgeSupportedMinecraftVersions() ([]string, error) {
data, err := util.CachedGetBytes(
forgePromotionsURL,
util.BytesRequestOptions{Kind: cache.KindMetadata},
)
if err != nil {
return nil, fmt.Errorf("fetch forge promotions failed: %w", err)
}
versions, err := parseForgeSupportedMinecraftVersions(data)
if err != nil {
return nil, err
}
if len(versions) == 0 {
return nil, fmt.Errorf("forge promotions is empty; see %s", forgeDocsURL)
}
return versions, nil
}
func parseForgeSupportedMinecraftVersions(data []byte) ([]string, error) {
var payload struct {
Promos json.RawMessage `json:"promos"`
}
if err := json.Unmarshal(data, &payload); err != nil {
return nil, fmt.Errorf("parse forge promotions failed: %w", err)
}
dec := json.NewDecoder(bytes.NewReader(payload.Promos))
tok, err := dec.Token()
if err != nil {
return nil, fmt.Errorf("parse forge promotions failed: %w", err)
}
if delim, ok := tok.(json.Delim); !ok || delim != '{' {
return nil, fmt.Errorf("parse forge promotions failed: promos is not an object")
}
seen := map[string]struct{}{}
versions := make([]string, 0)
for dec.More() {
keyTok, err := dec.Token()
if err != nil {
return nil, fmt.Errorf("parse forge promotions failed: %w", err)
}
key, ok := keyTok.(string)
if !ok {
return nil, fmt.Errorf("parse forge promotions failed: invalid promos key")
}
if _, err := dec.Token(); err != nil {
return nil, fmt.Errorf("parse forge promotions failed: %w", err)
}
base, ok := strings.CutSuffix(key, "-recommended")
if !ok {
base, ok = strings.CutSuffix(key, "-latest")
}
if !ok {
continue
}
if _, exists := seen[base]; exists {
continue
}
if !strings.HasPrefix(base, "1.") {
continue
}
seen[base] = struct{}{}
versions = append(versions, base)
}
return versions, nil
}
func verifyForgeInstallation(workPath string) error {
// Check for modern Forge (1.17+): libraries/ dir + launch script
librariesPath := filepath.Join(workPath, "libraries")
if _, err := os.Stat(librariesPath); err == nil {
// libraries/ exists, check for launch scripts
launchScripts := []string{
"run.sh", "run.bat", "unix_args.txt", "win_args.txt",
}
for _, script := range launchScripts {
if _, err := os.Stat(filepath.Join(workPath, script)); err == nil {
return nil // Modern Forge verified
}
}
}
// Check for legacy Forge: forge-*-universal.jar or forge-*.jar
entries, err := os.ReadDir(workPath)
if err != nil {
return fmt.Errorf(
"verify forge installation failed: cannot read work directory: %w",
err,
)
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
if strings.Contains(name, "forge-") && strings.HasSuffix(name, ".jar") {
return nil // Legacy Forge verified
}
}
return errors.New("forge installation verification failed: no artifacts found (expected libraries/ with launch scripts or forge-*.jar)")
}
func installForge(p types.PackageId) error {
if err := guardServerTopologyForForgePlatform(); err != nil {
return err
}
serverInfo := probe.ServerInfo()
if serverInfo.WorkPath == "" {
return errors.New("server working directory not found")
}
var gameVersion types.RawVersion
switch serverInfo.Runtime.DerivedModLoader() {
case types.PlatformVanilla:
gameVersion = serverInfo.Runtime.GameVersion
case types.PlatformNone:
selectedVersion := promptSelectMinecraftVersionForForge()
if selectedVersion == "none" || selectedVersion == "error" {
return errors.New("minecraft version selection cancelled or failed")
}
gameVersion = types.RawVersion(selectedVersion)
}
if gameVersion == types.VersionUnknown {
return fmt.Errorf(
"unknown minecraft version, cannot infer forge bootstrap artifact; see %s",
forgeDocsURL,
)
}
if err := checkJavaAvailability(); err != nil {
return err
}
if err := ensureMinecraftEULAAccepted(serverInfo.WorkPath); err != nil {
return err
}
promptSupportForgeProject()
forgeVersion, err := getForgeVersionFromPackageId(p, gameVersion)
p.Version = types.RawVersion(forgeVersion)
if err != nil {
return err
}
p.Version = types.RawVersion(forgeVersion)
fileURL := resolveForgeInstallerURL(gameVersion, forgeVersion)
if err := runModLoaderInstaller(
p,
fileURL,
serverInfo.WorkPath,
"Forge",
); err != nil {
return err
}
return verifyForgeInstallation(serverInfo.WorkPath)
}
func fetchForgeVersion(gameVersion types.RawVersion) (string, error) {
res, err := http.Get(forgePromotionsURL)
if err != nil {
return "", fmt.Errorf("fetch forge promotions failed: %w", err)
}
defer func() { _ = res.Body.Close() }()
if res.StatusCode < 200 || res.StatusCode >= 300 {
return "", fmt.Errorf(
"fetch forge promotions failed: status %d",
res.StatusCode,
)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("read forge promotions failed: %w", err)
}
var data forgePromotions
if err := json.Unmarshal(body, &data); err != nil {
return "", fmt.Errorf("parse forge promotions failed: %w", err)
}
if len(data.Promos) == 0 {
return "", fmt.Errorf("forge promotions is empty; see %s", forgeDocsURL)
}
keyBase := gameVersion.String()
if v := data.Promos[keyBase+"-recommended"]; v != "" {
return v, nil
}
if v := data.Promos[keyBase+"-latest"]; v != "" {
return v, nil
}
return "", fmt.Errorf(
"no forge version found for minecraft %s in promotions data; see %s (Forge) and %s (NeoForge comparison)",
gameVersion,
forgeDocsURL,
forgeNeoForgeDiffDocURL,
)
}
func resolveForgeInstallerURL(
gameVersion types.RawVersion,
forgeVersion string,
) string {
combinedVersion := fmt.Sprintf("%s-%s", gameVersion.String(), forgeVersion)
escaped := url.PathEscape(combinedVersion)
return fmt.Sprintf(
"%s/%s/forge-%s-installer.jar",
forgeMavenBaseURL,
escaped,
escaped,
)
}
// forgeStage represents a phase of the Forge installation process.
type forgeStage struct {
name string
floor float64 // start of stage window [0, 1]
span float64 // width of stage window [0, 1]
}
// forgeStages defines the ordered installation phases with hardcoded progress windows.
// Based on observed Forge installer output patterns:
// 0.00-0.08: Initialization (JVM info, directory setup)
// 0.08-0.20: Extraction (main jar extraction)
// 0.20-0.60: Libraries (bulk of work - downloading/validating dependencies)
// 0.60-0.95: Processors (post-processing, server jar generation)
// 0.95-1.00: Verification (final checks, reprobe)
var forgeStages = []forgeStage{
{name: "init", floor: 0.00, span: 0.02},
{name: "libraries", floor: 0.02, span: 0.08},
{name: "extract", floor: 0.10, span: 0.60},
{name: "writing", floor: 0.70, span: 0.2},
{name: "checksum", floor: 0.72, span: 0.03},
{name: "processing", floor: 0.75, span: 0.22},
{name: "completion", floor: 0.97, span: 0},
}
// forgeLogTail holds a bounded buffer of recent installer output lines.
type forgeLogTail struct {
lines []string
max int
}
func newForgeLogTail(maxLines int) *forgeLogTail {
return &forgeLogTail{lines: make([]string, 0, maxLines), max: maxLines}
}
func (t *forgeLogTail) append(line string) {
t.lines = append(t.lines, line)
if len(t.lines) > t.max {
t.lines = t.lines[1:]
}
}
func (t *forgeLogTail) String() string {
return strings.Join(t.lines, "\n")
}
// classifyForgeLine maps a log line to a stage index and returns whether it's a strong marker.
// Strong markers (true) advance the active stage; weak markers (false) only contribute to intra-stage progress.
func classifyForgeLine(line string) (stageIdx int, isStrong bool) {
lower := strings.ToLower(line)
// init stage
if strings.Contains(lower, "jvm info") ||
strings.Contains(lower, "current time") ||
strings.Contains(lower, "target directory") {
return 0, true
}
// libraries stage
if strings.Contains(lower, "considering library") ||
strings.Contains(lower, "downloading library") {
return 1, false
}
if strings.Contains(lower, "downloading libraries") {
return 1, true
}
// build & extract libraries stage
if strings.Contains(lower, "building processors") {
return 2, true
}
if strings.Contains(lower, "extracted") ||
strings.Contains(lower, "output") {
return 2, false
}
// writing stage
if strings.Contains(lower, "writing output:") {
return 3, true
}
// checksum stage
if strings.Contains(lower, "loading patches file:") {
return 4, true
}
if strings.Contains(lower, "reading patch") ||
strings.Contains(lower, "checksum") {
return 4, false
}
// processing stage
if strings.Contains(lower, "processing:") {
return 5, true
}
if strings.Contains(lower, "copying") ||
strings.Contains(lower, "patching") {
return 5, false
}
// completion stage marker
if strings.Contains(lower, "The server installed successfully") {
return 6, true
}
// Default: stay in current stage, weak marker
return -1, false
}
// forgeAsymptoticProgress computes intra-stage progress using an asymptotic function.
// score: cumulative line count within the stage (0+)
// floor, span: stage window boundaries
// Returns a value in [floor, floor+span) that approaches floor+span asymptotically.
func forgeAsymptoticProgress(x float64, floor, span float64) float64 {
const k = math.Ln10 * math.Ln2 * 4 // steepness of asymptotic curve
// progress = floor + span * (1 - exp(-k * x))
// As x → ∞, progress → floor + span
progress := floor + span*math.Tanh(math.Log(x+1)/k)
// Clamp to stage window to prevent overshoot
if progress > floor+span {
progress = floor + span
}
return progress
}
func runForgeInstaller(
installerPath string,
tracker *tuiprogress.Tracker,
) error {
installerName := path.Base(installerPath)
cmd := exec.Command("java", "-jar", installerName, "--installServer")
cmd.Dir = path.Dir(installerPath)
stdout, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("create stdout pipe failed: %w", err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return fmt.Errorf("create stderr pipe failed: %w", err)
}
merged := io.MultiReader(stdout, stderr)
scanner := bufio.NewScanner(merged)
if err := cmd.Start(); err != nil {
return fmt.Errorf("start installer failed: %w", err)
}
logWriter := tracker.LogWriter()
tail := newForgeLogTail(50)
activeStageIdx := 0
stageScores := make([]float64, len(forgeStages))
var failurePhrase string
for scanner.Scan() {
line := scanner.Text()
_, _ = fmt.Fprintln(logWriter, line)
tail.append(line)
// Detect explicit failure phrases
lower := strings.ToLower(line)
if failurePhrase == "" {
if strings.Contains(
lower,
"there was an error during installation",
) {
failurePhrase = "There was an error during installation"
} else if strings.Contains(lower, "processor failed") {
failurePhrase = "Processor failed"
} else if strings.Contains(lower, "missing jar for processor") {
failurePhrase = "Missing Jar for processor"
}
}
stageIdx, isStrong := classifyForgeLine(line)
if stageIdx >= 0 && stageIdx < len(forgeStages) &&
isStrong && stageIdx > activeStageIdx {
activeStageIdx = stageIdx
}
if activeStageIdx < len(forgeStages) {
stageScores[activeStageIdx]++
stage := forgeStages[activeStageIdx]
progress := forgeAsymptoticProgress(
stageScores[activeStageIdx],
stage.floor,
stage.span,
)
tracker.SetPercent(progress)
}
}
if err := scanner.Err(); err != nil {
return fmt.Errorf(
"read installer output failed: %w\nRecent output:\n%s",
err,
tail.String(),
)
}
if err := cmd.Wait(); err != nil {
if failurePhrase != "" {
return fmt.Errorf(
"run forge installer failed: %s\nRecent output:\n%s",
failurePhrase,
tail.String(),
)
}
return fmt.Errorf(
"run forge installer failed: %w\nRecent output:\n%s",
err,
tail.String(),
)
}
return nil
}
// runModLoaderInstaller is the shared execution skeleton for mod loader platform
// installers (Forge, NeoForge). It downloads the installer JAR to workPath,
// runs java -jar <installer> --installServer with progress tracking, and calls
// probe.Rebuild on success.
//
// platformName is used for user-facing progress labels (e.g. "Forge", "NeoForge").
func runModLoaderInstaller(
id types.PackageId,
fileURL string,
workPath string,
platformName string,
) error {
tracker := tuiprogress.NewTrackerWithLogging(id.StringFull(), 5)
defer func() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = tuiprogress.WaitForShutdown(ctx)
}()
defer tracker.Close()
result, err := util.CachedDownload(
fileURL,
workPath,
util.DownloadOptions{
Kind: cache.KindArtifact,
WrapReader: tracker.ProxyReader,
OnCacheHit: tracker.CacheHit,
OnResolvedFilename: func(title string) { tracker.SetTitle(title) },
FileMode: 0o750,
},
)
if err != nil {
return fmt.Errorf("download failed: %w", err)
}
if result == nil {
return errors.New("download result is nil")
}
defer func() { _ = result.File.Close() }()
installerPath := result.File.Name()
if err := runForgeInstaller(installerPath, tracker); err != nil {
return err
}
tracker.SetPercent(0.99)
probe.Rebuild()
tracker.Complete(platformName + " installed")
return nil
}
package install
import (
"errors"
"fmt"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/util"
)
func init() {
registerInstaller(types.PlatformAny, installGenericPackage)
}
func installGenericPackage(p types.Package) error {
if p.Remote == nil {
return errors.New("package remote data is missing")
}
showDownloadStart(p.Remote.FileUrl)
result, err := util.CachedDownload(p.Remote.FileUrl, ".", util.DownloadOptions{
Kind: cache.KindArtifact,
Filename: p.Remote.Filename,
ExpectedHash: p.Remote.Hash,
HashAlgorithm: cache.ParseHashAlgorithm(p.Remote.HashAlgorithm),
})
if err != nil {
return fmt.Errorf("download failed: %w", err)
}
defer result.File.Close()
showInstallComplete(result.File.Name())
return nil
}
package install
import (
"errors"
"fmt"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/types"
)
func ensureServerPlatformMatch(id types.PackageId) error {
platform := id.Platform
serverInfo := probe.ServerInfo()
switch platform {
case types.PlatformAny:
return nil
case types.PlatformMCDR:
if serverInfo.Environments.Mcdr == nil {
return errors.New("mcdr not found")
}
return nil
default:
if !serverInfo.Runtime.IsValid() {
return errors.New("no valid executable found, `lucy add` requires a server in current directory")
}
requiredCapability := probe.CapabilityForPlatform(platform)
if requiredCapability == "" {
return nil
}
topology := serverInfo.Runtime.Topology
result := probe.EvaluateCompatibility(topology, requiredCapability)
switch result.Verdict {
case types.CompatCompatible:
return nil
case types.CompatDegraded:
// CompatDegraded means the ecosystem is reachable only through an indirect
// hosted/support path. It is warn-only here; numeric risk gating is node-based.
logger.ShowWarn(fmt.Errorf(
"compatibility degraded for %s: %s (reason: %s)",
platform,
result.Detail,
result.Reason,
))
return nil
case types.CompatUnresolved:
return fmt.Errorf(
"topology unresolved for %s: cannot determine server compatibility",
platform.Title(),
)
case types.CompatIncompatible:
return fmt.Errorf(
"%s packages are incompatible with the current runtime (reason: %s, verdict: %s)",
platform.Title(),
result.Reason,
result.Verdict,
)
default:
return fmt.Errorf(
"%s runtime compatibility could not be confirmed (reason: %s, verdict: %s)",
platform.Title(),
result.Reason,
result.Verdict,
)
}
}
}
package install
import (
"fmt"
"slices"
"github.com/mclucy/lucy/types"
)
// sortIdentityPackages sorts identity packages by platform dependency tier.
// Tier 0: Minecraft (base platform)
// Tier 1: Fabric, Forge, NeoForge (mutually exclusive modloaders)
// Tier 2: MCDR (can coexist with anything)
// Within the same tier, input order is preserved.
// Duplicates (same platform) are deduplicated, keeping the first occurrence.
func sortIdentityPackages(ids []types.PackageId) []types.PackageId {
// Deduplicate by platform: keep first occurrence of each platform
seen := make(map[types.Platform]bool)
deduped := make([]types.PackageId, 0, len(ids))
for _, id := range ids {
platform := id.IdentityToPlatform()
if !seen[platform] {
seen[platform] = true
deduped = append(deduped, id)
}
}
// Sort by tier
slices.SortStableFunc(deduped, func(a, b types.PackageId) int {
tierA := getTier(a.IdentityToPlatform())
tierB := getTier(b.IdentityToPlatform())
if tierA < tierB {
return -1
}
if tierA > tierB {
return 1
}
return 0
})
return deduped
}
// validateIdentityCompatibility validates that no two incompatible identity packages exist.
// Incompatibility rule: only one tier-1 platform (Fabric, Forge, NeoForge) is allowed.
// Returns nil if valid, or an error describing the conflict.
func validateIdentityCompatibility(ids []types.PackageId) error {
tier1Platforms := make([]types.PackageId, 0)
for _, id := range ids {
platform := id.IdentityToPlatform()
tier := getTier(platform)
if tier == 1 {
tier1Platforms = append(tier1Platforms, id)
}
}
if len(tier1Platforms) > 1 {
// Build error message with conflicting platform names
names := make([]string, len(tier1Platforms))
for i, id := range tier1Platforms {
names[i] = string(id.Name)
}
return fmt.Errorf("incompatible identity packages: %v (only one modloader allowed)", names)
}
return nil
}
// getTier returns the dependency tier for a platform.
func getTier(platform types.Platform) int {
switch platform {
case types.PlatformMinecraft:
return 0
case types.PlatformFabric, types.PlatformForge, types.PlatformNeoforge:
return 1
case types.PlatformMCDR:
return 2
default:
return 3 // unknown platforms go last
}
}
package install
import (
"fmt"
"strings"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream/routing"
)
func InstallMany(ids []types.PackageId, source types.Source, options Options) (*Result, error) {
const maxReconcileIterations = 3
if len(ids) == 0 {
return &Result{}, nil
}
prepared := prepareBatchIDs(ids)
identityIds, regularIds := partitionBatchIDs(prepared)
if err := validateIdentityCompatibility(identityIds); err != nil {
return nil, err
}
identityIds = sortIdentityPackages(identityIds)
if len(identityIds) > 0 {
showBatchPhase("Installing platforms", identityIds)
succeeded := make([]string, 0, len(identityIds))
for _, id := range identityIds {
if err := installPlatform(id); err != nil {
if len(succeeded) > 0 {
return nil, fmt.Errorf(
"%s: failed to install %s (already installed: %s)",
err,
id.StringFull(),
strings.Join(succeeded, ", "),
)
}
return nil, fmt.Errorf("failed to install %s: %w", id.StringFull(), err)
}
succeeded = append(succeeded, id.StringFull())
}
probe.InvalidateServerInfo()
}
if len(regularIds) == 0 {
showBatchSummary(len(identityIds), 0)
return &Result{}, nil
}
showBatchPhase("Fetching metadata for", regularIds)
if err := validateRegularBatchIDs(regularIds); err != nil {
return nil, err
}
serverInfo := probe.ServerInfo()
providers, err := routing.ResolveProvidersFromTopology(
serverInfo.Runtime.Topology,
source,
)
if err != nil {
return nil, err
}
if serverInfo.Environments.Mcdr != nil {
mcdrProviders, err := routing.ResolveProviders(
types.PlatformMCDR,
types.SourceAuto,
)
if err != nil {
logger.ShowInfo(
fmt.Errorf("failed to resolve MCDR provider: %w", err),
)
} else {
providers = append(providers, mcdrProviders...)
}
}
roots := append([]types.PackageId(nil), regularIds...)
if serverLoader := serverInfo.Runtime.DerivedModLoader(); serverLoader != types.PlatformAny {
for i, id := range roots {
if id.Platform == types.PlatformAny {
roots[i].Platform = serverLoader
}
}
}
seedTx := NewRecursiveTransaction(roots, providers)
SnapshotInstalledConstraints(seedTx)
resolvePlan := newRecursiveResolutionPlan(roots, seedTx.InstalledConstraints)
var tx *RecursiveTransaction
var diff ReconcileDiff
for iteration := range maxReconcileIterations {
showRecursiveResolveStart(resolvePlan.Roots)
tx, err = BuildCandidateGraph(
resolvePlan.Roots,
providers,
resolvePlan.InstalledConstraints,
options,
)
if err != nil {
showRecursiveConflict(err)
return nil, err
}
pruneRecursiveCandidates(tx, resolvePlan.ExcludedCandidates)
packages := recursiveCandidatePackages(tx)
showRecursiveDownloadStart(len(packages))
tx.StagingDir, packages, err = downloadBatchPackages(serverInfo.WorkPath, packages)
if err != nil {
return nil, err
}
backfillRecursiveDownloads(tx, packages)
tx.AdvanceTo(PhaseDownloaded)
showRecursiveVerifyStart(len(tx.DownloadedArtifacts))
if err := VerifyDownloadedArtifacts(tx); err != nil {
return nil, err
}
diff, err = ReconcileTransaction(tx)
if err != nil {
showRecursiveConflict(err)
return nil, err
}
if diff.IsStable() {
break
}
if iteration == maxReconcileIterations-1 {
return nil, fmt.Errorf(
"install: recursive closure did not stabilize after %d iterations: %s",
maxReconcileIterations,
summarizeReconcileDiff(diff),
)
}
resolvePlan = refineRecursiveResolutionPlan(resolvePlan, diff)
}
plan, err := BuildRecursiveApplyPlan(tx)
if err != nil {
return nil, err
}
tx.SetApplyPlan(plan)
tx.AdvanceTo(PhaseCommitted)
if err := ApplyValidatedClosure(tx, serverInfo); err != nil {
return nil, err
}
return buildInstallResult(tx), nil
}
func buildInstallResult(tx *RecursiveTransaction) *Result {
if tx == nil || tx.Apply == nil {
return &Result{}
}
installed := append([]types.Package(nil), tx.Apply.Install...)
provenance := make(map[string][]string, len(tx.CandidateGraph))
for key, node := range tx.CandidateGraph {
provenance[key] = append([]string(nil), node.ProvenancePath...)
}
return &Result{Installed: installed, Provenance: provenance}
}
func prepareBatchIDs(ids []types.PackageId) []types.PackageId {
seen := make(map[string]struct{}, len(ids))
prepared := make([]types.PackageId, 0, len(ids))
for _, id := range ids {
if id.Version == types.VersionAny {
id.Version = types.VersionCompatible
}
if id.IsIdentityPackage() {
id.NormalizeIdentityPackage()
}
key := id.StringPlatformName()
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
prepared = append(prepared, id)
}
return prepared
}
func partitionBatchIDs(ids []types.PackageId) ([]types.PackageId, []types.PackageId) {
identityIds := make([]types.PackageId, 0, len(ids))
regularIds := make([]types.PackageId, 0, len(ids))
for _, id := range ids {
if id.IsIdentityPackage() {
identityIds = append(identityIds, id)
continue
}
regularIds = append(regularIds, id)
}
return identityIds, regularIds
}
func validateRegularBatchIDs(ids []types.PackageId) error {
failures := make([]string, 0)
for _, id := range ids {
if err := ensureServerPlatformMatch(id); err != nil {
failures = append(failures, fmt.Sprintf("%s: %v", id.StringFull(), err))
}
}
if len(failures) == 0 {
return nil
}
return fmt.Errorf(
"server compatibility check failed: %s",
strings.Join(failures, "; "),
)
}
package install
import (
"errors"
"fmt"
"os"
"os/exec"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/util"
)
func init() {
registerInstaller(types.PlatformMCDR, installMcdrPlugin)
}
func installMcdrPlugin(p types.Package) error {
if p.Id.Platform != types.PlatformMCDR {
return fmt.Errorf("unsupported platform: %s", p.Id.Platform)
}
if p.Remote == nil {
return errors.New("package remote data is missing")
}
serverInfo := probe.ServerInfo()
if serverInfo.Environments.Mcdr == nil {
return errors.New("mcdr not found")
}
pluginDirectories := serverInfo.Environments.Mcdr.Config.PluginDirectories
if len(pluginDirectories) == 0 {
return errors.New("mcdr plugin directory not found")
}
if err := os.MkdirAll(pluginDirectories[0], 0o755); err != nil {
return fmt.Errorf("create plugin directory failed: %w", err)
}
showDownloadStart(p.Remote.FileUrl)
result, err := util.CachedDownload(p.Remote.FileUrl, pluginDirectories[0], util.DownloadOptions{
Kind: cache.KindArtifact,
Filename: p.Remote.Filename,
ExpectedHash: p.Remote.Hash,
HashAlgorithm: cache.ParseHashAlgorithm(p.Remote.HashAlgorithm),
})
if err != nil {
return fmt.Errorf("download failed: %w", err)
}
defer result.File.Close()
showInstallComplete(result.File.Name())
return nil
}
func initMcdr() error {
err := exec.Command(
"mcdreforged",
"--version",
).Run() // check if mcdreforged is in PATH
if err != nil {
return err
}
// make subdir
err = os.Mkdir("server", 0o755)
if err != nil {
return err
}
// move everything to subdir
files, err := os.ReadDir(".")
if err != nil {
return err
}
for _, file := range files {
if file.Name() == "server" {
continue
}
err = os.Rename(file.Name(), "server/"+file.Name())
if err != nil {
return err
}
}
// init mcdr
err = exec.Command(
"mcdreforged",
"init",
).Run()
if err != nil {
return err
}
// rebuild server info
probe.Rebuild()
return nil
}
package install
import (
"errors"
"fmt"
"os"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/util"
)
// installModLoaderPackage is a unified function to handle the installation of mods
// since most mod loaders has the same mod loading process
func installModLoaderPackage(p types.Package, platform types.Platform) error {
if p.Id.Platform != platform {
return fmt.Errorf("unsupported platform: %s", p.Id.Platform)
}
if p.Remote == nil {
return errors.New("package remote data is missing")
}
serverInfo := probe.ServerInfo()
if len(serverInfo.ModPath) == 0 {
return errors.New("mod directory not found")
}
if err := os.MkdirAll(serverInfo.ModPath[0], 0o755); err != nil {
return fmt.Errorf("create mod directory failed: %w", err)
}
showDownloadStart(p.Remote.FileUrl)
result, err := util.CachedDownload(
p.Remote.FileUrl,
serverInfo.ModPath[0],
util.DownloadOptions{
Kind: cache.KindArtifact,
Filename: p.Remote.Filename,
ExpectedHash: p.Remote.Hash,
HashAlgorithm: cache.ParseHashAlgorithm(p.Remote.HashAlgorithm),
},
)
if err != nil {
return fmt.Errorf("download failed: %w", err)
}
defer result.File.Close()
showInstallComplete(result.File.Name())
return nil
}
package install
import (
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"charm.land/huh/v2"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/types"
)
func init() {
registerInstaller(types.PlatformNeoforge, installNeoForgeMod)
}
func installNeoForgeMod(p types.Package) error {
return installModLoaderPackage(p, types.PlatformNeoforge)
}
// guardServerTopologyForNeoForgePlatform returns an error if an incompatible
// mod loader is already installed.
func guardServerTopologyForNeoForgePlatform() error {
serverInfo := probe.ServerInfo()
serverPlatform := serverInfo.Runtime.DerivedModLoader()
switch serverPlatform {
case types.PlatformFabric, types.PlatformForge, types.PlatformNeoforge:
return fmt.Errorf(
"found an existing server platform %s, installation of NeoForge aborted",
serverPlatform.Title(),
)
}
return nil
}
var (
neoForgeMavenBaseURL = "https://maven.neoforged.net/releases/net/neoforged/neoforge"
neoForgeMetadataURL = "https://maven.neoforged.net/releases/net/neoforged/neoforge/maven-metadata.xml"
neoForgeDocsURL = "https://docs.neoforged.net/user/docs/server/"
)
type neoForgeMavenMetadata struct {
Versioning struct {
Latest string `xml:"latest"`
Release string `xml:"release"`
Versions []string `xml:"versions>version"`
} `xml:"versioning"`
}
func installNeoForge(id types.PackageId) error {
if err := guardServerTopologyForNeoForgePlatform(); err != nil {
return err
}
serverInfo := probe.ServerInfo()
if serverInfo.WorkPath == "" {
return errors.New("server working directory not found")
}
var gameVersion types.RawVersion
switch serverInfo.Runtime.DerivedModLoader() {
case types.PlatformVanilla:
gameVersion = serverInfo.Runtime.GameVersion
case types.PlatformNone:
selectedVersion := promptSelectMinecraftVersionForNeoForge()
if selectedVersion == "none" || selectedVersion == "error" {
return errors.New("minecraft version selection cancelled or failed")
}
gameVersion = types.RawVersion(selectedVersion)
}
if gameVersion == types.VersionUnknown {
return fmt.Errorf(
"unknown minecraft version, cannot infer NeoForge bootstrap artifact; see %s",
neoForgeDocsURL,
)
}
if err := checkJavaAvailability(); err != nil {
return err
}
if err := ensureMinecraftEULAAccepted(serverInfo.WorkPath); err != nil {
return err
}
neoForgeVersion, err := getNeoForgeVersionFromPackageId(id, gameVersion)
if err != nil {
return err
}
id.Version = types.RawVersion(neoForgeVersion)
fileURL := resolveNeoForgeInstallerURL(neoForgeVersion)
if err := runModLoaderInstaller(
id,
fileURL,
serverInfo.WorkPath,
"NeoForge",
); err != nil {
return err
}
return verifyNeoForgeInstallation(serverInfo.WorkPath)
}
// getNeoForgeVersionFromPackageId resolves the NeoForge version to install.
// If the version is explicit, it is returned as-is.
// Otherwise, the latest compatible version for the given Minecraft game version is fetched.
func getNeoForgeVersionFromPackageId(
p types.PackageId,
gameVersion types.RawVersion,
) (string, error) {
if p.Version != types.VersionLatest &&
p.Version != types.VersionCompatible &&
p.Version != types.VersionAny &&
p.Version != types.VersionUnknown {
return p.Version.String(), nil
}
return fetchLatestNeoForgeVersion(gameVersion)
}
// fetchLatestNeoForgeVersion fetches the latest NeoForge version compatible with
// the given Minecraft game version from the NeoForged Maven metadata.
//
// NeoForge version scheme: MAJOR.MINOR.PATCH where MAJOR = MC minor version,
// MINOR = MC patch version. E.g. NeoForge 21.4.x is for Minecraft 1.21.4.
func fetchLatestNeoForgeVersion(gameVersion types.RawVersion) (string, error) {
res, err := http.Get(neoForgeMetadataURL)
if err != nil {
return "", fmt.Errorf("fetch NeoForge metadata failed: %w", err)
}
defer func() { _ = res.Body.Close() }()
if res.StatusCode < 200 || res.StatusCode >= 300 {
return "", fmt.Errorf(
"fetch NeoForge metadata failed: status %d",
res.StatusCode,
)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("read NeoForge metadata failed: %w", err)
}
var meta neoForgeMavenMetadata
if err := xml.Unmarshal(body, &meta); err != nil {
return "", fmt.Errorf("parse NeoForge metadata failed: %w", err)
}
// NeoForge version prefix derived from MC version: "1.21.4" -> "21.4."
mcStr := gameVersion.String()
parts := strings.SplitN(mcStr, ".", 3)
if len(parts) < 2 {
return "", fmt.Errorf(
"cannot derive NeoForge version prefix from Minecraft version %s",
gameVersion,
)
}
// Drop the leading "1." from MC version to get NeoForge major.minor prefix
neoPrefix := strings.Join(parts[1:], ".") + "."
// Walk versions in reverse to find the latest matching version
versions := meta.Versioning.Versions
for i := len(versions) - 1; i >= 0; i-- {
v := versions[i]
if strings.HasPrefix(v, neoPrefix) {
return v, nil
}
}
return "", fmt.Errorf(
"no NeoForge version found for Minecraft %s (looked for prefix %s); see %s",
gameVersion,
neoPrefix,
neoForgeDocsURL,
)
}
// resolveNeoForgeInstallerURL builds the full Maven URL for a NeoForge installer JAR.
// Pattern: {mavenBase}/{version}/neoforge-{version}-installer.jar
func resolveNeoForgeInstallerURL(neoForgeVersion string) string {
return fmt.Sprintf(
"%s/%s/neoforge-%s-installer.jar",
neoForgeMavenBaseURL,
neoForgeVersion,
neoForgeVersion,
)
}
// verifyNeoForgeInstallation checks that the NeoForge installer produced the
// expected server artifacts in workPath.
//
// NeoForge generates: run.sh / run.bat, user_jvm_args.txt, libraries/net/neoforged/
func verifyNeoForgeInstallation(workPath string) error {
// Check for launch scripts
launchScripts := []string{"run.sh", "run.bat"}
for _, script := range launchScripts {
if _, err := os.Stat(filepath.Join(workPath, script)); err == nil {
return nil
}
}
// Fallback: check for NeoForge libraries directory
neoLibPath := filepath.Join(workPath, "libraries", "net", "neoforged")
if _, err := os.Stat(neoLibPath); err == nil {
return nil
}
return errors.New(
"NeoForge installation verification failed: no artifacts found " +
"(expected run.sh/run.bat or libraries/net/neoforged/)",
)
}
// promptSelectMinecraftVersionForNeoForge prompts the user to select a Minecraft
// version when no server executable is present.
func promptSelectMinecraftVersionForNeoForge() (version string) {
manifest, err := fetchMojangVersionManifest()
if err != nil || len(manifest.Versions) == 0 {
return "error"
}
gameVersions := make([]string, 0, 20)
for i := 0; i < len(manifest.Versions) && len(gameVersions) < 20; i++ {
if manifest.Versions[i].Type == "release" {
gameVersions = append(gameVersions, manifest.Versions[i].Id)
}
}
var installLatest bool
options := huh.NewOptions(gameVersions...)
err = huh.NewForm(
huh.NewGroup(
huh.NewConfirm().
Title("No current Minecraft installation found.").
Description("Do you want to install NeoForge with its latest supported Minecraft version?").
Affirmative("Yes, proceed").
Negative("No, select a game version").
Value(&installLatest),
),
).Run()
if err != nil {
return "none"
}
if installLatest {
return gameVersions[0]
}
err = huh.NewForm(
huh.NewGroup(
huh.NewSelect[string]().
Title("Select a Minecraft version for NeoForge").
Options(options...).
Filtering(true).
Height(10).
Value(&version),
).WithHide(installLatest),
).Run()
if err != nil {
return "none"
}
return
}
package install
type Options struct {
WithOptional bool
}
func DefaultOptions() Options {
return Options{}
}
package install
import (
"fmt"
"strings"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/types"
)
func showDownloadStart(url string) {
logger.ShowInfo(fmt.Sprintf("downloading from %s", url))
}
func showInstallComplete(path string) {
logger.ShowInfo(fmt.Sprintf("installed package to %s", path))
}
func showBatchPhase(header string, ids []types.PackageId) {
logger.ShowInfo(fmt.Sprintf("==> %s: %s", header, joinPackageNames(ids)))
}
func showBatchSummary(installed int, failed int) {
if failed == 0 {
logger.ShowInfo(fmt.Sprintf("%d packages installed", installed))
} else {
logger.ShowInfo(fmt.Sprintf("%d installed, %d failed", installed, failed))
}
}
func joinPackageNames(ids []types.PackageId) string {
if len(ids) == 0 {
return ""
}
if len(ids) == 1 {
return ids[0].StringFull()
}
if len(ids) == 2 {
return ids[0].StringFull() + " and " + ids[1].StringFull()
}
parts := make([]string, 0, len(ids))
for i := 0; i < len(ids)-1; i++ {
parts = append(parts, ids[i].StringFull())
}
return strings.Join(parts, ", ") + ", and " + ids[len(ids)-1].StringFull()
}
func showRecursiveResolveStart(roots []types.PackageId) {
logger.ShowInfo(fmt.Sprintf("resolving dependencies for %s", joinPackageNames(roots)))
}
func showRecursiveDownloadStart(count int) {
logger.ShowInfo(fmt.Sprintf("downloading %d artifacts", count))
}
func showRecursiveVerifyStart(count int) {
logger.ShowInfo(fmt.Sprintf("verifying %d artifacts locally", count))
}
func showRecursiveReconcileStart() {
logger.ShowInfo("reconciling advisory and verified graphs")
}
func showRecursiveReconcileDiff(diff ReconcileDiff) {
verbals := []string{}
if len(diff.Missing) > 0 {
verbals = append(verbals, fmt.Sprintf("+%d missing", len(diff.Missing)))
}
if len(diff.Extra) > 0 {
verbals = append(verbals, fmt.Sprintf("-%d extra", len(diff.Extra)))
}
if len(diff.Tightened) > 0 {
verbals = append(verbals, fmt.Sprintf("~%d tightened", len(diff.Tightened)))
}
logger.ShowInfo("reconcile: " + joinStrings(verbals))
}
func showRecursiveApplyStart(count int) {
logger.ShowInfo(fmt.Sprintf("applying %d changes", count))
}
func showRecursiveConflict(err error) {
logger.ShowInfo(fmt.Sprintf("conflict:\n%s", err.Error()))
}
func joinStrings(strs []string) string {
if len(strs) == 0 {
return "none"
}
return strings.Join(strs, ", ")
}
package install
import (
"encoding/json"
"errors"
"fmt"
"time"
"charm.land/huh/v2"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/util"
)
func getFabricLoaderVersion(loaderVersion types.RawVersion) (string, error) {
if loaderVersion == types.VersionUnknown {
return "", errors.New("unknown game version, cannot resolve fabric loader version")
}
versions, err := fetchFabricLoaderVersions()
if err != nil {
return "", err
}
if loaderVersion == types.VersionLatest || loaderVersion == types.VersionCompatible || loaderVersion == types.VersionAny {
if len(versions) == 0 {
return "", errors.New("no fabric loader versions available")
}
return versions[0].Version, nil
}
for _, v := range versions {
if v.Version == loaderVersion.String() {
return v.Version, nil
}
}
return "", fmt.Errorf(
"fabric loader version %s not found",
loaderVersion.String(),
)
}
func getFabricGameVersion(gameVersion types.RawVersion) (string, error) {
if gameVersion == types.VersionUnknown {
return "", errors.New("unknown game version, cannot resolve fabric game version")
}
versions, err := fetchFabricGameVersions()
if err != nil {
return "", err
}
if gameVersion == types.VersionLatest || gameVersion == types.VersionCompatible || gameVersion == types.VersionAny {
if len(versions) == 0 {
return "", errors.New("no fabric game versions available")
}
return versions[0].Version, nil
}
for _, v := range versions {
if v.Version == gameVersion.String() {
return v.Version, nil
}
}
return "", fmt.Errorf(
"fabric game version %s not found",
gameVersion.String(),
)
}
func getLatestFabricInstallerVersion() (string, error) {
versions, err := fetchFabricInstallerVersions()
if err != nil {
return "", err
}
if len(versions) == 0 {
return "", errors.New("no fabric installer versions found")
}
return versions[0].Version, nil
}
func fetchFabricLoaderVersions() (
loaderVersions []fabricLoaderVersionEntry,
err error,
) {
err = fetchFabricVersionsMeta("loader", &loaderVersions)
return
}
func fetchFabricGameVersions() (
gameVersions []fabricInstallerVersion,
err error,
) {
err = fetchFabricVersionsMeta("game", &gameVersions)
return
}
func fetchFabricInstallerVersions() (
installerVersions []fabricInstallerVersion,
err error,
) {
err = fetchFabricVersionsMeta("installer", &installerVersions)
return
}
func fetchFabricVersionsMeta(endpoint string, target any) (err error) {
apiEndpoint := fabricMetaBaseURL + "/v2/versions/" + endpoint
data, err := util.CachedGetBytes(
apiEndpoint,
util.BytesRequestOptions{
Kind: cache.KindMetadata,
TTL: 3 * 24 * time.Hour,
},
)
if err != nil {
return fmt.Errorf(
"fetch fabric %s versions meta failed: %w",
endpoint, err,
)
}
err = json.Unmarshal(data, target)
if err != nil {
return fmt.Errorf(
"parse fabric %s versions meta failed: %w",
endpoint, err,
)
}
return
}
func promptOverrideVanillaWithFabric() (override bool, deleteVanilla bool) {
path := probe.ServerInfo().Runtime.PrimaryEntrance
version := probe.ServerInfo().Runtime.GameVersion
form := huh.NewForm(
huh.NewGroup(
huh.NewConfirm().
Title("Vanilla server detected, override it with a corresponding fabric server?").
Description(
fmt.Sprintf(
"Found server at %s, with game version %s",
path, version,
),
).
Value(&override),
),
huh.NewGroup(
huh.NewConfirm().
Title("Delete vanilla server after fabric installation?").
Description(fmt.Sprintf("Will delete %s", path)).
Value(&deleteVanilla),
).WithHideFunc(func() bool { return !override }),
)
_ = form.Run()
return
}
func promptSelectMinecraftVersionForFabric() (version string) {
versions, err := fetchFabricGameVersions()
if err != nil || len(versions) == 0 {
return "error"
}
gameVersions := make([]string, len(versions))
for i, v := range versions {
gameVersions[i] = v.Version
}
var installLatest bool
options := huh.NewOptions[string](gameVersions...)
err = huh.NewForm(
huh.NewGroup(
huh.NewConfirm().
Title("No current Minecraft installation found.").
Description("Do you want to install fabric with its latest supported Minecraft version?").
Affirmative("Yes, proceed").
Negative("No, select a game version").
Value(&installLatest),
),
).Run()
if err != nil {
return "none"
}
if installLatest {
return gameVersions[0]
}
err = huh.NewForm(
huh.NewGroup(
huh.NewSelect[string]().
Title("Select a Minecraft installation").
Options(options...).
Filtering(true).
Height(10).
Value(&version),
).WithHide(installLatest),
).Run()
if err != nil {
return "none"
}
return
}
package install
import (
"errors"
"fmt"
"os"
"path/filepath"
"slices"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/types"
)
func recursiveInstallDestination(
serverInfo types.ServerInfo,
pkg types.Package,
) string {
if pkg.Id.Platform.IsModding() && len(serverInfo.ModPath) > 0 {
return serverInfo.ModPath[0]
}
if pkg.Id.Platform == types.PlatformMCDR &&
serverInfo.Environments.Mcdr != nil &&
len(serverInfo.Environments.Mcdr.Config.PluginDirectories) > 0 {
return serverInfo.Environments.Mcdr.Config.PluginDirectories[0]
}
if len(serverInfo.ModPath) == 1 {
return serverInfo.ModPath[0]
}
return serverInfo.WorkPath
}
func BuildRecursiveApplyPlan(tx *RecursiveTransaction) (ApplyPlan, error) {
if tx == nil {
return ApplyPlan{}, fmt.Errorf("install: nil recursive transaction")
}
candidateByName := make(map[types.ProjectName]CandidateNode, len(tx.CandidateGraph))
for _, node := range tx.CandidateGraph {
if node.Package.Remote != nil {
candidateByName[node.Package.Id.Name] = node
}
}
keys := make([]string, 0, len(tx.VerifiedGraph))
for key := range tx.VerifiedGraph {
keys = append(keys, key)
}
slices.Sort(keys)
install := make([]types.Package, 0, len(keys))
for _, key := range keys {
verified := tx.VerifiedGraph[key].Package
candidate, ok := tx.CandidateGraph[key]
if !ok || candidate.Package.Remote == nil {
candidate, ok = candidateByName[verified.Id.Name]
}
if !ok || candidate.Package.Remote == nil {
return ApplyPlan{}, fmt.Errorf(
"install: verified package %s is missing candidate remote metadata",
verified.Id.StringFull(),
)
}
pkg := verified
pkg.Remote = candidate.Package.Remote
install = append(install, pkg)
}
remove := make([]types.Package, 0)
for _, extraId := range tx.ReconcileDiff.Extra {
key := extraId.StringPlatformName()
node, ok := tx.CandidateGraph[key]
if !ok {
continue
}
if node.Package.Local == nil || node.Package.Local.Path == "" {
continue
}
remove = append(remove, node.Package)
}
return ApplyPlan{Install: install, Remove: remove}, nil
}
// ApplyValidatedClosure executes the finalized install/remove plan after the
// recursive transaction has been committed.
func ApplyValidatedClosure(tx *RecursiveTransaction, serverInfo types.ServerInfo) error {
if tx == nil {
return errors.New("install: recursive transaction is nil")
}
if tx.Phase != PhaseCommitted {
return fmt.Errorf("install: apply requires committed phase, got %d", tx.Phase)
}
if tx.Apply == nil {
return errors.New("install: apply requires a validated apply plan")
}
if serverInfo.WorkPath != "" && serverInfo.WorkPath != "." {
if err := os.MkdirAll(serverInfo.WorkPath, 0o755); err != nil {
return fmt.Errorf("create server work path failed: %w", err)
}
}
applied := 0
showRecursiveApplyStart(len(tx.Apply.Install))
if tx.StagingDir != "" && len(tx.Apply.Install) > 0 {
var moveErrors []error
for _, pkg := range tx.Apply.Install {
if pkg.Local == nil || pkg.Local.Path == "" {
continue
}
src := pkg.Local.Path
dstDir := recursiveInstallDestination(serverInfo, pkg)
if dstDir != "" && dstDir != "." {
if err := os.MkdirAll(dstDir, 0o755); err != nil {
moveErrors = append(moveErrors, fmt.Errorf("create install directory for %s: %w", pkg.Id.StringFull(), err))
continue
}
}
dst := filepath.Join(dstDir, filepath.Base(src))
if err := os.Rename(src, dst); err != nil {
moveErrors = append(moveErrors, fmt.Errorf("move %s: %w", pkg.Id.StringFull(), err))
continue
}
pkg.Local.Path = dst
applied++
}
if len(moveErrors) > 0 {
return errors.Join(moveErrors...)
}
}
var applyErrors []error
for _, pkg := range tx.Apply.Remove {
if pkg.Local == nil || pkg.Local.Path == "" {
continue
}
if err := os.Remove(pkg.Local.Path); err != nil {
applyErrors = append(
applyErrors,
fmt.Errorf("remove %s: %w", pkg.Id.StringFull(), err),
)
continue
}
applied++
}
showBatchSummary(applied, len(applyErrors))
if len(applyErrors) > 0 {
return errors.Join(applyErrors...)
}
probe.InvalidateServerInfo()
return nil
}
package install
import (
"fmt"
"github.com/mclucy/lucy/types"
)
func formatVersionConstraint(constraint types.VersionConstraint) string {
return constraint.Operator.ToSign() + fmt.Sprint(constraint.Value)
}
package install
import "github.com/mclucy/lucy/types"
// ConstraintGraph is the merged requirement graph keyed by
// PackageId.StringPlatformName().
type ConstraintGraph map[string]ConstraintRequirement
// ConstraintRequirement is the merged requirement set for one package identity.
// Constraint contains the merged DNF expression, and Provenance preserves the
// requester that contributed each atomic clause.
type ConstraintRequirement struct {
Id types.PackageId
Constraint types.VersionConstraintExpression
Provenance []ConstraintProvenance
variants []constraintVariant
}
// ConstraintProvenance records one atomic merged clause and the requester that
// introduced it.
type ConstraintProvenance struct {
Requester string
Constraint types.VersionConstraint
}
type constraintVariant struct {
Clauses []ConstraintProvenance
}
type boundConstraint struct {
Clause ConstraintProvenance
Inclusive bool
LowerBound bool
}
// MergeConstraintGraph is the pure constraint solver core for recursive
// installation. Callers must keep probing, routing, logging, and output outside
// this boundary and provide only in-memory constraint inputs.
//
// It merges all incoming constraint inputs by package identity, preserving
// requester provenance and returning a conflict error when the merged
// expression becomes unsatisfiable.
func MergeConstraintGraph(inputs []ConstraintInput) (ConstraintGraph, error) {
graph := make(ConstraintGraph)
for _, input := range inputs {
id := input.Dependency.Id
key := id.StringPlatformName()
entry := graph[key]
if entry.Id.Name == "" {
entry.Id = types.PackageId{Platform: id.Platform, Name: id.Name}
entry.variants = []constraintVariant{{}}
}
variants, err := constraintInputVariants(input)
if err != nil {
return nil, err
}
mergedVariants, mergeErr := mergeRequirementVariants(entry.Id, entry.variants, variants)
if mergeErr != nil {
return nil, mergeErr
}
entry.variants = mergedVariants
entry.Constraint = variantsToExpression(mergedVariants)
entry.Provenance = flattenProvenance(mergedVariants)
graph[key] = entry
}
return graph, nil
}
// IsSatisfied reports whether the merged requirement for id accepts version.
func (g ConstraintGraph) IsSatisfied(id types.PackageId, version types.ComparableVersion) bool {
entry, ok := g[id.StringPlatformName()]
if !ok {
return false
}
dep := types.Dependency{Id: entry.Id, Constraint: entry.Constraint, Mandatory: true}
return dep.Satisfy(id, version)
}
func mergeRequirementVariants(
id types.PackageId,
left []constraintVariant,
right []constraintVariant,
) ([]constraintVariant, error) {
if len(left) == 0 {
left = []constraintVariant{{}}
}
if len(right) == 0 {
right = []constraintVariant{{}}
}
merged := make([]constraintVariant, 0, len(left)*len(right))
var firstConflict *ConstraintConflictError
for _, leftVariant := range left {
for _, rightVariant := range right {
combined := constraintVariant{Clauses: make([]ConstraintProvenance, 0, len(leftVariant.Clauses)+len(rightVariant.Clauses))}
combined.Clauses = append(combined.Clauses, leftVariant.Clauses...)
combined.Clauses = append(combined.Clauses, rightVariant.Clauses...)
ok, conflict := conjunctionSatisfiable(id, combined.Clauses)
if ok {
merged = append(merged, combined)
continue
}
if firstConflict == nil {
firstConflict = conflict
}
}
}
if len(merged) == 0 {
if firstConflict != nil {
return nil, firstConflict
}
return nil, &ConstraintConflictError{PackageId: id}
}
return merged, nil
}
func conjunctionSatisfiable(id types.PackageId, clauses []ConstraintProvenance) (bool, *ConstraintConflictError) {
var eq *ConstraintProvenance
var lower *boundConstraint
var upper *boundConstraint
neqs := make([]ConstraintProvenance, 0)
for i := range clauses {
clause := clauses[i]
switch clause.Constraint.Operator {
case types.OpEq:
if eq == nil {
eq = &clause
continue
}
cmp, ok := eq.Constraint.Value.Compare(clause.Constraint.Value)
if ok && cmp != 0 {
return false, conflictFor(id, *eq, clause)
}
case types.OpNeq:
neqs = append(neqs, clause)
case types.OpGt:
lower = strongerLower(lower, boundConstraint{Clause: clause, LowerBound: true})
case types.OpGte:
lower = strongerLower(lower, boundConstraint{Clause: clause, Inclusive: true, LowerBound: true})
case types.OpLt:
upper = strongerUpper(upper, boundConstraint{Clause: clause})
case types.OpLte:
upper = strongerUpper(upper, boundConstraint{Clause: clause, Inclusive: true})
}
}
if eq != nil {
for _, neq := range neqs {
cmp, ok := eq.Constraint.Value.Compare(neq.Constraint.Value)
if ok && cmp == 0 {
return false, conflictFor(id, *eq, neq)
}
}
for _, clause := range clauses {
if clause.Constraint.Operator == types.OpEq {
continue
}
cmp := clause.Constraint.Operator.Comparator()
if !cmp(eq.Constraint.Value, clause.Constraint.Value) {
return false, conflictFor(id, *eq, clause)
}
}
return true, nil
}
if lower != nil && upper != nil {
cmp, ok := lower.Clause.Constraint.Value.Compare(upper.Clause.Constraint.Value)
if ok {
if cmp > 0 {
return false, conflictFor(id, lower.Clause, upper.Clause)
}
if cmp == 0 && (!lower.Inclusive || !upper.Inclusive) {
return false, conflictFor(id, lower.Clause, upper.Clause)
}
}
}
return true, nil
}
func strongerLower(current *boundConstraint, candidate boundConstraint) *boundConstraint {
if current == nil {
return &candidate
}
cmp, ok := current.Clause.Constraint.Value.Compare(candidate.Clause.Constraint.Value)
if !ok {
return current
}
if cmp < 0 {
return &candidate
}
if cmp > 0 {
return current
}
if current.Inclusive && !candidate.Inclusive {
return &candidate
}
return current
}
func strongerUpper(current *boundConstraint, candidate boundConstraint) *boundConstraint {
if current == nil {
return &candidate
}
cmp, ok := current.Clause.Constraint.Value.Compare(candidate.Clause.Constraint.Value)
if !ok {
return current
}
if cmp > 0 {
return &candidate
}
if cmp < 0 {
return current
}
if current.Inclusive && !candidate.Inclusive {
return &candidate
}
return current
}
func conflictFor(id types.PackageId, left, right ConstraintProvenance) *ConstraintConflictError {
return &ConstraintConflictError{
PackageId: id,
Left: ConstraintConflictSource{Requester: left.Requester, Constraint: left.Constraint},
Right: ConstraintConflictSource{Requester: right.Requester, Constraint: right.Constraint},
}
}
package install
import (
"fmt"
"github.com/mclucy/lucy/dependency"
"github.com/mclucy/lucy/types"
)
func constraintInputVariants(input ConstraintInput) ([]constraintVariant, error) {
expr, err := normalizeConstraintExpression(input.Dependency)
if err != nil {
return nil, err
}
variants := make([]constraintVariant, 0, len(expr))
for _, group := range expr {
variant := constraintVariant{Clauses: make([]ConstraintProvenance, 0, len(group))}
for _, clause := range group {
expanded, expandErr := expandConstraint(clause)
if expandErr != nil {
return nil, expandErr
}
for _, expandedClause := range expanded {
variant.Clauses = append(variant.Clauses, ConstraintProvenance{
Requester: input.Requester,
Constraint: expandedClause,
})
}
}
variants = append(variants, variant)
}
if len(variants) == 0 {
return []constraintVariant{{}}, nil
}
return variants, nil
}
func normalizeConstraintExpression(dep types.Dependency) (types.VersionConstraintExpression, error) {
if len(dep.Constraint) > 0 {
return cloneConstraintExpression(dep.Constraint), nil
}
if dep.Id.Version == "" || dep.Id.Version == types.VersionAny || dep.Id.Version.IsInvalid() || dep.Id.Version.CanInfer() {
return types.VersionConstraintExpression{{}}, nil
}
value, err := dependency.Parse(dep.Id.Version, defaultVersionScheme(dep.Id))
if err != nil {
return nil, fmt.Errorf("install: failed to parse fixed constraint version %q for %s: %w", dep.Id.Version, dep.Id.StringPlatformName(), err)
}
if value == nil {
return nil, fmt.Errorf("install: failed to parse fixed constraint version %q for %s", dep.Id.Version, dep.Id.StringPlatformName())
}
return types.VersionConstraintExpression{{{
Operator: types.OpEq,
Value: value,
}}}, nil
}
func defaultVersionScheme(id types.PackageId) types.VersionScheme {
if id.Platform == types.PlatformMinecraft {
return types.MinecraftRelease
}
return types.Semver
}
func expandConstraint(clause types.VersionConstraint) ([]types.VersionConstraint, error) {
switch clause.Operator {
case types.OpWeakEq:
lower, upper, ok := semverWindow(clause.Value, true)
if !ok {
return []types.VersionConstraint{clause}, nil
}
return []types.VersionConstraint{{Operator: types.OpGte, Value: lower}, {Operator: types.OpLt, Value: upper}}, nil
case types.OpWeakGt:
_, upper, ok := semverWindow(clause.Value, false)
if !ok {
return []types.VersionConstraint{{Operator: types.OpGt, Value: clause.Value}}, nil
}
return []types.VersionConstraint{{Operator: types.OpGt, Value: clause.Value}, {Operator: types.OpLt, Value: upper}}, nil
default:
return []types.VersionConstraint{clause}, nil
}
}
type semverTuple interface {
Major() uint64
Minor() uint64
Patch() uint64
}
func semverWindow(value types.ComparableVersion, tilde bool) (types.ComparableVersion, types.ComparableVersion, bool) {
if value == nil || value.Scheme() != types.Semver {
return nil, nil, false
}
sv, ok := value.(semverTuple)
if !ok {
return nil, nil, false
}
if tilde {
if sv.Minor() == 0 && sv.Patch() == 0 {
return value, dependency.NewSemver(sv.Major()+1, 0, 0), true
}
return value, dependency.NewSemver(sv.Major(), sv.Minor()+1, 0), true
}
return value, dependency.NewSemver(sv.Major()+1, 0, 0), true
}
func variantsToExpression(variants []constraintVariant) types.VersionConstraintExpression {
expr := make(types.VersionConstraintExpression, 0, len(variants))
for _, variant := range variants {
group := make([]types.VersionConstraint, 0, len(variant.Clauses))
for _, clause := range variant.Clauses {
group = append(group, clause.Constraint)
}
expr = append(expr, group)
}
if len(expr) == 0 {
return types.VersionConstraintExpression{{}}
}
return expr
}
func flattenProvenance(variants []constraintVariant) []ConstraintProvenance {
provenance := make([]ConstraintProvenance, 0)
for _, variant := range variants {
provenance = append(provenance, variant.Clauses...)
}
return provenance
}
func cloneConstraintExpression(expr types.VersionConstraintExpression) types.VersionConstraintExpression {
cloned := make(types.VersionConstraintExpression, len(expr))
for i, group := range expr {
cloned[i] = append([]types.VersionConstraint(nil), group...)
}
return cloned
}
package install
import (
"context"
"fmt"
"os"
"slices"
"strings"
"sync"
"time"
"github.com/mclucy/lucy/cache"
tuiprogress "github.com/mclucy/lucy/tui/progress"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/util"
)
func recursiveCandidatePackages(tx *RecursiveTransaction) []types.Package {
if tx == nil {
return nil
}
keys := make([]string, 0, len(tx.CandidateGraph))
for key, node := range tx.CandidateGraph {
if node.Package.Remote == nil {
continue
}
keys = append(keys, key)
}
slices.Sort(keys)
packages := make([]types.Package, 0, len(keys))
for _, key := range keys {
packages = append(packages, tx.CandidateGraph[key].Package)
}
return packages
}
func pruneRecursiveCandidates(tx *RecursiveTransaction, excluded map[string]struct{}) {
if tx == nil || len(excluded) == 0 {
return
}
for key := range excluded {
delete(tx.CandidateGraph, key)
}
}
func backfillRecursiveDownloads(tx *RecursiveTransaction, packages []types.Package) {
if tx == nil {
return
}
for _, pkg := range packages {
if pkg.Local == nil {
continue
}
tx.DownloadedArtifacts[pkg.Id.StringFull()] = pkg.Local.Path
key := pkg.Id.StringPlatformName()
node, ok := tx.CandidateGraph[key]
if !ok {
continue
}
node.Package.Local = pkg.Local
tx.CandidateGraph[key] = node
}
}
func downloadBatchPackages(
workPath string,
packages []types.Package,
) (stagingDir string, downloaded []types.Package, err error) {
stagingDir, err = os.MkdirTemp("", "lucy_*")
if err != nil {
return "", nil, fmt.Errorf("create staging directory failed: %w", err)
}
if workPath != "." {
if err := os.MkdirAll(workPath, 0o755); err != nil {
return stagingDir, nil, fmt.Errorf("create server work path failed: %w", err)
}
}
resolvedIds := make([]types.PackageId, len(packages))
for i, p := range packages {
resolvedIds[i] = p.Id
}
showBatchPhase("Downloading", resolvedIds)
type slot struct {
pkg types.Package
err error
ok bool
failed bool
}
slots := make([]slot, len(packages))
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i, p := range packages {
tracker := tuiprogress.NewTracker(p.Id.StringFull())
wg.Add(1)
go func(index int, pkg types.Package, tracker *tuiprogress.Tracker) {
defer wg.Done()
defer tracker.Close()
if ctx.Err() != nil {
slots[index] = slot{failed: true, err: ctx.Err()}
return
}
result, err := util.CachedDownload(
pkg.Remote.FileUrl,
stagingDir,
util.DownloadOptions{
Kind: cache.KindArtifact,
Filename: pkg.Remote.Filename,
ExpectedHash: pkg.Remote.Hash,
HashAlgorithm: cache.ParseHashAlgorithm(pkg.Remote.HashAlgorithm),
WrapReader: tracker.ProxyReader,
OnResolvedFilename: func(name string) {
tracker.SetTitle(name)
},
OnCacheHit: tracker.CacheHit,
},
)
if err != nil {
cancel()
slots[index] = slot{failed: true, err: err}
return
}
if result.File != nil {
pkg.Local = &types.PackageInstallation{Path: result.File.Name()}
if err := result.File.Close(); err != nil {
cancel()
slots[index] = slot{failed: true, err: err}
return
}
}
slots[index] = slot{ok: true, pkg: pkg}
}(i, p, tracker)
}
wg.Wait()
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second)
defer shutdownCancel()
_ = tuiprogress.WaitForShutdown(shutdownCtx)
downloaded = make([]types.Package, 0, len(packages))
failures := make([]string, 0)
for i, item := range slots {
if item.ok {
downloaded = append(downloaded, item.pkg)
}
if item.failed {
failures = append(failures, fmt.Sprintf("%s: %v", packages[i].Id.StringFull(), item.err))
}
}
if len(failures) > 0 {
return stagingDir, nil, fmt.Errorf(
"failed to download packages: %s",
strings.Join(failures, "; "),
)
}
return stagingDir, downloaded, nil
}
package install
import (
"fmt"
"github.com/mclucy/lucy/types"
)
// ConstraintConflictSource identifies one requester-side clause participating in
// an irreconcilable merged constraint.
type ConstraintConflictSource struct {
Requester string
Constraint types.VersionConstraint
}
// ConstraintConflictError reports that merged requirements for one package
// identity have no satisfiable intersection.
type ConstraintConflictError struct {
PackageId types.PackageId
Left ConstraintConflictSource
Right ConstraintConflictSource
}
func (e *ConstraintConflictError) Error() string {
if e == nil {
return "install: constraint conflict"
}
return fmt.Sprintf(
"install: constraint conflict for %s between %q (%s) and %q (%s)",
e.PackageId.StringPlatformName(),
e.Left.Requester,
formatVersionConstraint(e.Left.Constraint),
e.Right.Requester,
formatVersionConstraint(e.Right.Constraint),
)
}
package install
import (
"fmt"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/types"
)
// SnapshotInstalledConstraints reads installed packages from the probe snapshot
// and converts them into fixed InstalledConstraint entries for the transaction.
// Each installed package is treated as an immutable anchor during recursive
// solving; it will never be auto-replaced by the solver.
func SnapshotInstalledConstraints(tx *RecursiveTransaction) {
si := probe.ServerInfo()
constraints := make([]InstalledConstraint, 0, len(si.Packages)+3)
seen := make(map[string]struct{}, len(si.Packages)+3)
appendConstraint := func(pkg types.Package, requester string) {
if pkg.Id.Version.IsInvalid() {
return
}
key := pkg.Id.StringPlatformName()
if _, ok := seen[key]; ok {
return
}
seen[key] = struct{}{}
constraints = append(constraints, InstalledConstraint{
Package: pkg,
ConstraintInput: ConstraintInput{
Requester: requester,
Dependency: types.Dependency{
Id: pkg.Id,
Mandatory: true,
},
},
})
}
for _, pkg := range si.Packages {
appendConstraint(pkg, fmt.Sprintf("installed:%s", pkg.Id.StringFull()))
}
if si.Runtime != nil {
loader := si.Runtime.DerivedModLoader()
if loader.Valid() && loader != types.PlatformNone && loader != types.PlatformUnknown {
if !si.Runtime.GameVersion.IsInvalid() && si.Runtime.GameVersion != types.VersionAny {
appendConstraint(types.Package{
Id: types.PackageId{
Platform: loader,
Name: types.ProjectName("minecraft"),
Version: si.Runtime.GameVersion,
},
}, fmt.Sprintf("runtime:%s/minecraft@%s", loader, si.Runtime.GameVersion))
}
appendConstraint(types.Package{
Id: types.PackageId{
Platform: loader,
Name: types.ProjectName("java"),
Version: types.VersionAny,
},
}, fmt.Sprintf("runtime:%s/java", loader))
if primary := si.Runtime.PrimaryRuntimeIdentity(); primary != nil {
if alias := runtimeLoaderAliasName(primary.IdentityToPlatform()); alias != "" {
appendConstraint(types.Package{
Id: types.PackageId{
Platform: loader,
Name: alias,
Version: primary.Version,
},
}, fmt.Sprintf("runtime:%s/%s@%s", loader, alias, primary.Version))
}
}
}
}
tx.InstalledConstraints = constraints
}
func runtimeLoaderAliasName(platform types.Platform) types.ProjectName {
switch platform {
case types.PlatformFabric:
return types.ProjectName("fabricloader")
case types.PlatformForge:
return types.ProjectName("forge")
case types.PlatformNeoforge:
return types.ProjectName("neoforge")
default:
return ""
}
}
// FindCompatibleInstalled searches the installed-constraint snapshot for any
// package with the same platform and name as the requested ID, returning all
// matches. Results are informational only; the solver must not auto-select them.
func FindCompatibleInstalled(tx *RecursiveTransaction, id types.PackageId) []types.Package {
var matches []types.Package
for _, ic := range tx.InstalledConstraints {
pkg := ic.Package
if pkg.Id.Platform != id.Platform {
continue
}
if pkg.Id.Name != id.Name {
continue
}
matches = append(matches, pkg)
}
return matches
}
// ReportCompatibleInstalled logs any locally-installed versions that are
// compatible with the given package ID. This is an informational-only report;
// no automatic selection occurs.
func ReportCompatibleInstalled(tx *RecursiveTransaction, id types.PackageId) {
matches := FindCompatibleInstalled(tx, id)
for _, pkg := range matches {
logger.ShowInfo(fmt.Sprintf(
"[recursive] compatible installed version found: %s (not auto-selected)",
pkg.Id.StringFull(),
))
}
}
package install
import (
"fmt"
"slices"
"strings"
"github.com/mclucy/lucy/types"
)
// ReconcileTransaction compares advisory candidate facts with authoritative
// verified facts, computes a stable diff, and validates tightened local
// constraints through the merge engine.
func ReconcileTransaction(tx *RecursiveTransaction) (ReconcileDiff, error) {
if tx == nil {
return ReconcileDiff{}, fmt.Errorf("install: nil recursive transaction")
}
if tx.Phase != PhaseVerified {
return ReconcileDiff{}, fmt.Errorf("install: reconcile requires PhaseVerified transaction")
}
showRecursiveReconcileStart()
diff, err := reconcileDiffKernel(
tx.Roots,
tx.InstalledConstraints,
tx.CandidateGraph,
tx.VerifiedGraph,
)
if err != nil {
return ReconcileDiff{}, err
}
tx.ReconcileDiff = diff
showRecursiveReconcileDiff(diff)
return diff, nil
}
func reconcileDiffKernel(
roots []types.PackageId,
installed []InstalledConstraint,
candidateGraph map[string]CandidateNode,
verifiedGraph map[string]CandidateNode,
) (ReconcileDiff, error) {
baseInputs, err := reconcileConstraintInputs(roots, installed, candidateGraph, verifiedGraph)
if err != nil {
return ReconcileDiff{}, err
}
diff, err := reconcileDiff(candidateGraph, verifiedGraph)
if err != nil {
return ReconcileDiff{}, err
}
if diff.IsStable() {
return diff, nil
}
if err := reconcileValidateTightenedDiff(baseInputs, diff); err != nil {
return ReconcileDiff{}, err
}
return diff, nil
}
func reconcileDiff(candidateGraph map[string]CandidateNode, verifiedGraph map[string]CandidateNode) (ReconcileDiff, error) {
missing := make(map[string]types.PackageId)
tightened := make(map[string]ConstraintInput)
for key, verifiedNode := range verifiedGraph {
verifiedDeps, err := reconcileDependencyMap(verifiedNode.Package.Id.StringFull(), verifiedNode.Package.Dependencies)
if err != nil {
return ReconcileDiff{}, err
}
advisoryDeps := map[string]types.Dependency{}
if advisoryNode, ok := candidateGraph[key]; ok {
advisoryDeps, err = reconcileDependencyMap(advisoryNode.Package.Id.StringFull(), advisoryNode.Package.Dependencies)
if err != nil {
return ReconcileDiff{}, err
}
}
for depKey, verifiedDep := range verifiedDeps {
// Embedded deps are physically bundled inside the parent JAR
// (e.g. NeoForge JarInJar). They are already present on disk and
// do not need to be resolved from upstream package registries.
if verifiedDep.Mandatory && !verifiedDep.Embedded {
if _, exists := candidateGraph[depKey]; !exists {
missing[depKey] = verifiedDep.Id
}
}
advisoryDep, ok := advisoryDeps[depKey]
if !ok {
continue
}
if !reconcileConstraintTightened(advisoryDep, verifiedDep) {
continue
}
tightened[reconcileTightenedKey(verifiedNode.Package.Id.StringFull(), depKey)] = ConstraintInput{
Requester: verifiedNode.Package.Id.StringFull(),
Dependency: verifiedDep,
}
}
}
reachable, err := reconcileReachableCandidateClosure(candidateGraph, verifiedGraph)
if err != nil {
return ReconcileDiff{}, err
}
// Build a name-only index of verified nodes to handle platform normalisation:
// upstream APIs may return platform=none/any/unknown for a package that the
// local detector identifies as forge/fabric/etc. A candidate keyed as
// "none/create" is the same artifact as a verified node keyed "forge/create".
verifiedByName := make(map[types.ProjectName]struct{}, len(verifiedGraph))
for _, vn := range verifiedGraph {
verifiedByName[vn.Package.Id.Name] = struct{}{}
}
extra := make(map[string]types.PackageId)
for key, candidateNode := range candidateGraph {
if !candidateNode.Advisory {
continue
}
if _, ok := reachable[key]; ok {
continue
}
// Treat a platform-wildcard candidate as reachable if a verified node
// with the same name exists — they represent the same artifact.
p := candidateNode.Package.Id.Platform
if p == types.PlatformNone || p == types.PlatformAny || p.CanInfer() {
if _, ok := verifiedByName[candidateNode.Package.Id.Name]; ok {
continue
}
}
extra[key] = candidateNode.Package.Id
}
return ReconcileDiff{
Missing: reconcileSortedPackageIDs(missing),
Extra: reconcileSortedPackageIDs(extra),
Tightened: reconcileSortedConstraintInputs(tightened),
}, nil
}
func reconcileValidateTightenedDiff(baseInputs []ConstraintInput, diff ReconcileDiff) error {
if len(diff.Tightened) == 0 {
return nil
}
inputs := append([]ConstraintInput(nil), baseInputs...)
inputs = append(inputs, diff.Tightened...)
if _, err := MergeConstraintGraph(inputs); err != nil {
return fmt.Errorf("install: reconcile made no progress, aborting")
}
return nil
}
func reconcileConstraintInputs(
roots []types.PackageId,
installed []InstalledConstraint,
candidateGraph map[string]CandidateNode,
verifiedGraph map[string]CandidateNode,
) ([]ConstraintInput, error) {
inputs := make([]ConstraintInput, 0)
for _, root := range roots {
inputs = append(inputs, ConstraintInput{
Requester: "root",
Dependency: types.Dependency{
Id: root,
Mandatory: true,
},
})
}
for _, installed := range installed {
inputs = append(inputs, installed.ConstraintInput)
}
keys := make(map[string]struct{}, len(candidateGraph)+len(verifiedGraph))
for key := range candidateGraph {
keys[key] = struct{}{}
}
for key := range verifiedGraph {
keys[key] = struct{}{}
}
orderedKeys := make([]string, 0, len(keys))
for key := range keys {
orderedKeys = append(orderedKeys, key)
}
slices.Sort(orderedKeys)
for _, key := range orderedKeys {
node, ok := verifiedGraph[key]
if !ok {
node, ok = candidateGraph[key]
}
if !ok {
continue
}
deps, err := reconcileDependencyMap(node.Package.Id.StringFull(), node.Package.Dependencies)
if err != nil {
return nil, err
}
depKeys := make([]string, 0, len(deps))
for depKey := range deps {
depKeys = append(depKeys, depKey)
}
slices.Sort(depKeys)
for _, depKey := range depKeys {
inputs = append(inputs, ConstraintInput{
Requester: node.Package.Id.StringFull(),
Dependency: deps[depKey],
})
}
}
return inputs, nil
}
func reconcileDependencyMap(requester string, deps *types.PackageDependencies) (map[string]types.Dependency, error) {
if deps == nil || len(deps.Value) == 0 {
return map[string]types.Dependency{}, nil
}
mandatory := make(map[string]bool)
embedded := make(map[string]bool)
inputs := make([]ConstraintInput, 0, len(deps.Value))
for _, dep := range deps.Value {
key := dep.Id.StringPlatformName()
mandatory[key] = mandatory[key] || dep.Mandatory
embedded[key] = embedded[key] || dep.Embedded
inputs = append(inputs, ConstraintInput{Requester: requester, Dependency: dep})
}
graph, err := MergeConstraintGraph(inputs)
if err != nil {
return nil, err
}
merged := make(map[string]types.Dependency, len(graph))
for key, requirement := range graph {
merged[key] = types.Dependency{
Id: types.PackageId{
Platform: requirement.Id.Platform,
Name: requirement.Id.Name,
},
Constraint: requirement.Constraint,
Mandatory: mandatory[key],
Embedded: embedded[key],
}
}
return merged, nil
}
func reconcileReachableCandidateClosure(
candidateGraph map[string]CandidateNode,
verifiedGraph map[string]CandidateNode,
) (map[string]struct{}, error) {
reachable := make(map[string]struct{}, len(verifiedGraph))
queue := make([]string, 0, len(verifiedGraph))
for key := range verifiedGraph {
reachable[key] = struct{}{}
queue = append(queue, key)
}
for len(queue) > 0 {
key := queue[0]
queue = queue[1:]
node, ok := verifiedGraph[key]
if !ok {
node, ok = candidateGraph[key]
}
if !ok {
continue
}
deps, err := reconcileDependencyMap(node.Package.Id.StringFull(), node.Package.Dependencies)
if err != nil {
return nil, err
}
depKeys := make([]string, 0, len(deps))
for depKey := range deps {
depKeys = append(depKeys, depKey)
}
slices.Sort(depKeys)
for _, depKey := range depKeys {
if _, exists := candidateGraph[depKey]; !exists {
continue
}
if _, seen := reachable[depKey]; seen {
continue
}
reachable[depKey] = struct{}{}
queue = append(queue, depKey)
}
}
return reachable, nil
}
func reconcileConstraintTightened(advisory, verified types.Dependency) bool {
if verified.Mandatory && !advisory.Mandatory {
return true
}
if advisory.Mandatory != verified.Mandatory && !verified.Mandatory {
return false
}
if reconcileConstraintExpressionKey(advisory.Constraint) == reconcileConstraintExpressionKey(verified.Constraint) {
return false
}
merged, err := MergeConstraintGraph([]ConstraintInput{
{Requester: "advisory", Dependency: advisory},
{Requester: "verified", Dependency: verified},
})
if err != nil {
return true
}
entry, ok := merged[verified.Id.StringPlatformName()]
if !ok {
return false
}
return reconcileConstraintExpressionKey(entry.Constraint) == reconcileConstraintExpressionKey(verified.Constraint)
}
func reconcileSortedPackageIDs(items map[string]types.PackageId) []types.PackageId {
result := make([]types.PackageId, 0, len(items))
for _, id := range items {
result = append(result, id)
}
slices.SortFunc(result, func(a, b types.PackageId) int {
if a.Platform != b.Platform {
return strings.Compare(a.Platform.String(), b.Platform.String())
}
if a.Name != b.Name {
return strings.Compare(a.Name.String(), b.Name.String())
}
return strings.Compare(a.Version.String(), b.Version.String())
})
return result
}
func reconcileSortedConstraintInputs(items map[string]ConstraintInput) []ConstraintInput {
result := make([]ConstraintInput, 0, len(items))
for _, input := range items {
result = append(result, input)
}
slices.SortFunc(result, func(a, b ConstraintInput) int {
if a.Requester != b.Requester {
return strings.Compare(a.Requester, b.Requester)
}
if a.Dependency.Id.Platform != b.Dependency.Id.Platform {
return strings.Compare(a.Dependency.Id.Platform.String(), b.Dependency.Id.Platform.String())
}
if a.Dependency.Id.Name != b.Dependency.Id.Name {
return strings.Compare(a.Dependency.Id.Name.String(), b.Dependency.Id.Name.String())
}
return strings.Compare(
reconcileConstraintExpressionKey(a.Dependency.Constraint),
reconcileConstraintExpressionKey(b.Dependency.Constraint),
)
})
return result
}
func reconcileTightenedKey(requester, depKey string) string {
return requester + "->" + depKey
}
func reconcileConstraintExpressionKey(expr types.VersionConstraintExpression) string {
if len(expr) == 0 {
return "any"
}
groups := make([]string, 0, len(expr))
for _, group := range expr {
if len(group) == 0 {
groups = append(groups, "any")
continue
}
clauses := make([]string, 0, len(group))
for _, clause := range group {
clauses = append(clauses, formatVersionConstraint(clause))
}
slices.Sort(clauses)
groups = append(groups, strings.Join(clauses, "&"))
}
slices.Sort(groups)
return strings.Join(groups, "|")
}
package install
import (
"fmt"
"strings"
"github.com/mclucy/lucy/types"
)
// recursiveResolutionPlan is the pure value contract for one recursive
// resolution pass: which roots to solve, which fixed constraints to respect,
// and which advisory candidates must be pruned before verification.
type recursiveResolutionPlan struct {
Roots []types.PackageId
InstalledConstraints []InstalledConstraint
ExcludedCandidates map[string]struct{}
}
func newRecursiveResolutionPlan(
roots []types.PackageId,
installedConstraints []InstalledConstraint,
) recursiveResolutionPlan {
return recursiveResolutionPlan{
Roots: append([]types.PackageId(nil), roots...),
InstalledConstraints: append([]InstalledConstraint(nil), installedConstraints...),
ExcludedCandidates: map[string]struct{}{},
}
}
func refineRecursiveResolutionPlan(
plan recursiveResolutionPlan,
diff ReconcileDiff,
) recursiveResolutionPlan {
return recursiveResolutionPlan{
Roots: appendMissingRoots(plan.Roots, diff.Missing),
InstalledConstraints: mergeReconcileConstraints(
plan.InstalledConstraints,
tightenedConstraintInputs(diff.Tightened),
),
ExcludedCandidates: excludedCandidateKeys(diff.Extra),
}
}
func summarizeReconcileDiff(diff ReconcileDiff) string {
parts := make([]string, 0, 3)
if len(diff.Missing) > 0 {
parts = append(parts, fmt.Sprintf("missing=%d", len(diff.Missing)))
}
if len(diff.Extra) > 0 {
parts = append(parts, fmt.Sprintf("extra=%d", len(diff.Extra)))
}
if len(diff.Tightened) > 0 {
parts = append(parts, fmt.Sprintf("tightened=%d", len(diff.Tightened)))
}
if len(parts) == 0 {
return "no changes"
}
return strings.Join(parts, ", ")
}
func excludedCandidateKeys(ids []types.PackageId) map[string]struct{} {
excluded := make(map[string]struct{}, len(ids))
for _, id := range ids {
excluded[id.StringPlatformName()] = struct{}{}
}
return excluded
}
func appendMissingRoots(existing []types.PackageId, missing []types.PackageId) []types.PackageId {
if len(missing) == 0 {
return append([]types.PackageId(nil), existing...)
}
seen := make(map[string]struct{}, len(existing)+len(missing))
updated := make([]types.PackageId, 0, len(existing)+len(missing))
for _, id := range existing {
key := id.StringPlatformName()
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
updated = append(updated, id)
}
for _, id := range missing {
key := id.StringPlatformName()
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
updated = append(updated, id)
}
return updated
}
func mergeReconcileConstraints(groups ...[]InstalledConstraint) []InstalledConstraint {
merged := make([]InstalledConstraint, 0)
index := make(map[string]int)
for _, group := range groups {
for _, constraint := range group {
key := reconcileConstraintInputKey(constraint.ConstraintInput)
if pos, ok := index[key]; ok {
merged[pos] = constraint
continue
}
index[key] = len(merged)
merged = append(merged, constraint)
}
}
return merged
}
func tightenedConstraintInputs(inputs []ConstraintInput) []InstalledConstraint {
constraints := make([]InstalledConstraint, 0, len(inputs))
for _, input := range inputs {
constraints = append(constraints, InstalledConstraint{ConstraintInput: input})
}
return constraints
}
func reconcileConstraintInputKey(input ConstraintInput) string {
return input.Requester + "|" + input.Dependency.Id.StringPlatformName()
}
package install
import (
"fmt"
"strings"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
"github.com/mclucy/lucy/upstream/routing"
)
type candidateRequest struct {
id types.PackageId
provenancePath []string
mandatory bool
}
type candidateGraphResolver interface {
ResolvePackage(id types.PackageId) (types.Package, error)
ResolveDependencies(pkg types.Package) ([]types.PackageDependencies, error)
}
type candidateGraphPlanner struct {
tx *RecursiveTransaction
constraintInputs []ConstraintInput
queue []candidateRequest
}
// BuildCandidateGraph expands the recursive advisory dependency closure for the
// requested roots, seeding fixed installed constraints up front and running the
// constraint merge engine after every newly discovered dependency batch.
func BuildCandidateGraph(
roots []types.PackageId,
providers []upstream.Provider,
installedConstraints []InstalledConstraint,
options Options,
) (*RecursiveTransaction, error) {
return BuildCandidateGraphWithResolver(
roots,
providers,
installedConstraints,
options,
providerCandidateResolver{providers: providers},
)
}
// BuildCandidateGraphWithResolver drives candidate-graph expansion using a
// caller-provided resolver so the planning loop can run without direct provider
// or routing calls in the planner core.
func BuildCandidateGraphWithResolver(
roots []types.PackageId,
providers []upstream.Provider,
installedConstraints []InstalledConstraint,
options Options,
resolver candidateGraphResolver,
) (*RecursiveTransaction, error) {
planner, err := newCandidateGraphPlanner(roots, providers, installedConstraints)
if err != nil {
return nil, err
}
for {
current, ok := planner.next()
if !ok {
return planner.transaction(), nil
}
pkg, err := resolver.ResolvePackage(current.id)
if err != nil {
if current.mandatory {
return nil, err
}
continue
}
dependencySets, err := resolver.ResolveDependencies(pkg)
if err != nil {
if current.mandatory {
return nil, err
}
continue
}
if err := planner.admit(current, pkg, dependencySets, options); err != nil {
return nil, err
}
}
}
func newCandidateGraphPlanner(
roots []types.PackageId,
providers []upstream.Provider,
installedConstraints []InstalledConstraint,
) (*candidateGraphPlanner, error) {
tx := NewRecursiveTransaction(roots, providers)
tx.InstalledConstraints = append([]InstalledConstraint(nil), installedConstraints...)
constraintInputs := make([]ConstraintInput, 0, len(installedConstraints))
for _, installed := range installedConstraints {
constraintInputs = append(constraintInputs, installed.ConstraintInput)
if installed.Package.Id.Platform == "" || installed.Package.Id.Name == "" {
continue
}
key := installed.Package.Id.StringPlatformName()
if _, exists := tx.CandidateGraph[key]; exists {
continue
}
tx.CandidateGraph[key] = CandidateNode{
Package: installed.Package,
ProvenancePath: []string{installed.ConstraintInput.Requester},
Advisory: false,
}
}
if _, err := MergeConstraintGraph(constraintInputs); err != nil {
return nil, err
}
queue := make([]candidateRequest, 0, len(roots))
for _, root := range roots {
ReportCompatibleInstalled(tx, root)
queue = append(queue, candidateRequest{
id: root,
provenancePath: []string{"root"},
mandatory: true,
})
}
return &candidateGraphPlanner{
tx: tx,
constraintInputs: constraintInputs,
queue: queue,
}, nil
}
func (planner *candidateGraphPlanner) next() (candidateRequest, bool) {
for len(planner.queue) > 0 {
current := planner.queue[0]
planner.queue = planner.queue[1:]
key := current.id.StringPlatformName()
if _, exists := planner.tx.CandidateGraph[key]; exists {
continue
}
return current, true
}
return candidateRequest{}, false
}
func (planner *candidateGraphPlanner) admit(
current candidateRequest,
pkg types.Package,
dependencySets []types.PackageDependencies,
options Options,
) error {
key := current.id.StringPlatformName()
planner.tx.CandidateGraph[key] = CandidateNode{
Package: pkg,
ProvenancePath: append([]string(nil), current.provenancePath...),
Advisory: true,
}
batchInputs := make([]ConstraintInput, 0)
children := make([]candidateRequest, 0)
for _, dependencySet := range dependencySets {
requester := current.id.StringFull()
for _, dependency := range dependencySet.Value {
if !dependency.Mandatory && !options.WithOptional {
continue
}
batchInputs = append(batchInputs, ConstraintInput{
Requester: requester,
Dependency: dependency,
})
childKey := dependency.Id.StringPlatformName()
if _, exists := planner.tx.CandidateGraph[childKey]; exists {
continue
}
children = append(children, candidateRequest{
id: dependency.Id,
provenancePath: appendPath(current.provenancePath, requester),
mandatory: dependency.Mandatory,
})
}
}
if len(batchInputs) > 0 {
planner.constraintInputs = append(planner.constraintInputs, batchInputs...)
if _, err := MergeConstraintGraph(planner.constraintInputs); err != nil {
return err
}
}
planner.queue = append(planner.queue, children...)
return nil
}
func (planner *candidateGraphPlanner) transaction() *RecursiveTransaction {
if planner == nil {
return nil
}
return planner.tx
}
func appendPath(path []string, requester string) []string {
next := make([]string, 0, len(path)+1)
next = append(next, path...)
next = append(next, requester)
return next
}
func formatProviderErrors(providerErrors []routing.ProviderError) string {
if len(providerErrors) == 0 {
return "no provider succeeded"
}
reasons := make([]string, 0, len(providerErrors))
for _, providerErr := range providerErrors {
reasons = append(reasons, fmt.Sprintf(" - %s", providerErr.Error()))
}
return "provider failures:\n" + strings.Join(reasons, "\n")
}
package install
import (
"fmt"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
"github.com/mclucy/lucy/upstream/routing"
)
type providerCandidateResolver struct {
providers []upstream.Provider
}
func (resolver providerCandidateResolver) ResolvePackage(
id types.PackageId,
) (types.Package, error) {
attempts := []types.PackageId{id}
if id.Version == types.VersionCompatible {
attempts = append(attempts,
types.PackageId{Platform: id.Platform, Name: id.Name, Version: types.VersionLatest},
types.PackageId{Platform: id.Platform, Name: id.Name, Version: types.VersionAny},
)
}
var lastErrors []routing.ProviderError
for _, attempt := range attempts {
fetches, providerErrors := routing.FetchMany(resolver.providers, attempt)
if len(fetches) == 0 {
lastErrors = providerErrors
continue
}
fetch := fetches[0]
return types.Package{
Id: fetch.ResolvedID,
Remote: &fetch.Remote,
}, nil
}
return types.Package{}, fmt.Errorf(
"install: failed to resolve mandatory dependency %s: %s",
id.StringPlatformName(),
formatProviderErrors(lastErrors),
)
}
func (resolver providerCandidateResolver) ResolveDependencies(
pkg types.Package,
) ([]types.PackageDependencies, error) {
providers := providersForSource(resolver.providers, pkg.Remote)
dependencySets, providerErrors := routing.DependenciesMany(providers, pkg.Id)
if len(dependencySets) > 0 {
return dependencySets, nil
}
return nil, fmt.Errorf(
"install: failed to resolve mandatory dependency %s: %s",
pkg.Id.StringPlatformName(),
formatProviderErrors(providerErrors),
)
}
func providersForSource(
providers []upstream.Provider,
remote *types.PackageRemote,
) []upstream.Provider {
if remote == nil {
return providers
}
filtered := make([]upstream.Provider, 0, 1)
for _, provider := range providers {
if provider.Source() == remote.Source {
filtered = append(filtered, provider)
}
}
if len(filtered) == 0 {
return providers
}
return filtered
}
package install
import (
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
// RecursivePhase describes the current lifecycle phase of a RecursiveTransaction.
// Phases advance monotonically; no phase may be skipped, and committed state
// is never reachable before verified state has been established.
type RecursivePhase uint8
const (
// PhaseCandidate is the initial phase. The transaction holds root requests
// and advisory upstream dependency metadata. No local facts are present yet.
PhaseCandidate RecursivePhase = iota
// PhaseDownloaded means all candidate artifacts have been downloaded to a
// staging area. Advisory upstream edges are still the only dependency source.
PhaseDownloaded
// PhaseVerified means downloaded JARs have been analyzed by local detectors
// and verified dependency facts replace the advisory upstream edges.
// Conflict detection has run successfully. This is the gate before apply.
PhaseVerified
// PhaseCommitted is reached only after a validated, conflict-free closure
// exists. File-system mutations are allowed only in this phase.
PhaseCommitted
)
// ConstraintInput is a single advisory or installed dependency edge fed into
// the constraint merge engine. It carries the requester identity for conflict
// provenance reporting.
type ConstraintInput struct {
// Requester is a human-readable label identifying which package or root
// requested this dependency (e.g. "root", "fabric-api@0.97.2+1.21.1").
Requester string
// Dependency is the dependency constraint being asserted by Requester.
Dependency types.Dependency
}
// InstalledConstraint represents a currently-installed package treated as a
// fixed constraint during recursive solving. The package must not be replaced
// automatically; it only contributes as a fixed version anchor.
type InstalledConstraint struct {
// Package is the installed package with its local installation path.
Package types.Package
// ConstraintInput is the fixed constraint edge derived from this installed
// package, used as an immutable lower-bound in the constraint engine.
ConstraintInput ConstraintInput
}
// CandidateNode is a package that has been admitted into the candidate graph.
// A node may be advisory (from upstream APIs) or verified (from local JARs).
// Advisory nodes MUST NOT trigger file-system mutations.
type CandidateNode struct {
// Package holds the package metadata. At PhaseCandidate, only Remote may be
// populated. At PhaseVerified, Local.Path will be set.
Package types.Package
// ProvenancePath records the chain of requesters that caused this node to
// enter the graph, starting from a root request. This is used by conflict
// reporting and reconcile diff output.
ProvenancePath []string
// Advisory is true when this node's dependency facts come from an upstream
// API. Advisory nodes' Dependencies should NOT be treated as authoritative.
Advisory bool
}
// ReconcileDiff records the difference between the advisory candidate graph
// and the verified local graph. It drives the reconcile loop that converges
// the transaction towards a stable validated closure.
type ReconcileDiff struct {
// Missing are packages present in the verified graph but absent from the
// current candidate graph. They must be added and downloaded before apply.
Missing []types.PackageId
// Extra are candidate nodes present only in the advisory upstream graph but
// not reachable from the verified closure. They must be dropped before apply.
Extra []types.PackageId
// Tightened are packages whose verified constraints are stricter than the
// advisory upstream constraints. The constraint engine must be re-run with
// the tighter constraints.
Tightened []ConstraintInput
}
// IsStable returns true when the diff has no pending changes, indicating the
// transaction has converged to a stable validated closure.
func (d ReconcileDiff) IsStable() bool {
return len(d.Missing) == 0 && len(d.Extra) == 0 && len(d.Tightened) == 0
}
// ApplyPlan is the final, immutable set of operations to execute during the
// committed phase. It is constructed only after reconcile has produced a stable
// validated closure. No file-system mutations happen before this struct exists.
type ApplyPlan struct {
// Install is the ordered list of packages to install in this transaction.
Install []types.Package
// Remove is the list of locally-installed packages proven unreachable from
// the validated closure. Only packages within this transaction's scope are
// eligible for removal.
Remove []types.Package
}
// RecursiveTransaction is the central state object for a recursive install
// operation. It is passed between all pipeline stages (candidate expansion,
// download, local verification, reconcile, apply) rather than loose slices.
//
// Value boundaries:
// - PURE fields (no side effects): Phase, Roots, InstalledConstraints,
// CandidateGraph, VerifiedGraph, ReconcileDiff, Apply
// - ADAPTER-OWNED fields (side-effect capable): Providers (network I/O),
// DownloadedArtifacts (filesystem), StagingDir (filesystem paths)
//
// Invariants enforced by this type's construction and phase transitions:
// - Phase starts at PhaseCandidate; it advances only via AdvanceTo.
// - ApplyPlan may only be set when Phase == PhaseVerified.
// - No caller may invoke file-system mutations while Phase < PhaseCommitted.
// - InstalledConstraints are immutable after transaction construction.
type RecursiveTransaction struct {
// Phase is the current lifecycle stage. See RecursivePhase constants.
// PURE: no side effects.
Phase RecursivePhase
// Roots are the top-level package IDs requested by the user.
// PURE: just data.
Roots []types.PackageId
// InstalledConstraints is a snapshot of currently-installed packages taken
// from probe.ServerInfo() at transaction start. These are fixed constraints
// that the solver must respect; they are never auto-replaced.
// PURE: computed once at transaction creation; no live filesystem/network.
InstalledConstraints []InstalledConstraint
// Providers are the upstream provider instances used for dependency fetches
// during candidate graph expansion.
// ADAPTER-OWNED: performs network I/O via upstream.Provider interface.
Providers []upstream.Provider
// CandidateGraph is the advisory dependency closure computed from upstream
// APIs and installed constraints. Keyed by PackageId.StringPlatformName().
// PURE: computed from Provider results (already fetched).
CandidateGraph map[string]CandidateNode
// DownloadedArtifacts maps PackageId.StringFull() to the local file path
// of the downloaded JAR. Populated during PhaseDownloaded.
// ADAPTER-OWNED: contains filesystem paths.
DownloadedArtifacts map[string]string
// VerifiedGraph is the authoritative dependency closure derived from local
// JAR detector analysis. Populated during PhaseVerified. Supersedes
// advisory facts in CandidateGraph for conflict and reconcile decisions.
// PURE: computed from local JAR analysis results.
VerifiedGraph map[string]CandidateNode
// ReconcileDiff is the latest diff between candidate and verified graphs.
// It is updated on each reconcile iteration and must be stable (IsStable())
// before the transaction may advance to PhaseVerified.
// PURE: computed from graph comparison; no side effects.
ReconcileDiff ReconcileDiff
// Apply holds the finalized operation set. It is set only once, immediately
// before advancing to PhaseCommitted.
// PURE: computed plan; actual filesystem mutations happen outside this type.
Apply *ApplyPlan
// StagingDir is the temporary directory where artifacts are downloaded
// during the download phase. Used for atomic move to target directory.
// ADAPTER-OWNED: filesystem path string.
StagingDir string
}
// NewRecursiveTransaction constructs a transaction in PhaseCandidate with the
// given root IDs and provider list. The installed constraints snapshot must
// be populated by the caller from probe.ServerInfo() before expansion begins.
func NewRecursiveTransaction(
roots []types.PackageId,
providers []upstream.Provider,
) *RecursiveTransaction {
return &RecursiveTransaction{
Phase: PhaseCandidate,
Roots: roots,
Providers: providers,
CandidateGraph: make(map[string]CandidateNode),
DownloadedArtifacts: make(map[string]string),
VerifiedGraph: make(map[string]CandidateNode),
}
}
// AdvanceTo advances the transaction phase to next. It panics if the requested
// phase is not a strict successor of the current phase, enforcing monotonic
// forward-only advancement.
func (tx *RecursiveTransaction) AdvanceTo(next RecursivePhase) {
if next != tx.Phase+1 {
panic("install: RecursiveTransaction phase advancement out of order")
}
if next == PhaseCommitted && tx.Apply == nil {
panic("install: cannot commit transaction without a validated ApplyPlan")
}
tx.Phase = next
}
// SetApplyPlan finalizes the apply plan. It may only be called when the
// transaction is in PhaseVerified. The transaction must then be advanced to
// PhaseCommitted before mutations begin.
func (tx *RecursiveTransaction) SetApplyPlan(plan ApplyPlan) {
if tx.Phase != PhaseVerified {
panic("install: ApplyPlan may only be set in PhaseVerified")
}
tx.Apply = &plan
}
package install
import (
"fmt"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/slugmap"
"github.com/mclucy/lucy/types"
)
// VerifyDownloadedArtifacts analyzes locally-downloaded artifacts and replaces
// advisory dependency facts with authoritative detector output.
func VerifyDownloadedArtifacts(tx *RecursiveTransaction) error {
if tx == nil {
return fmt.Errorf("install: nil recursive transaction")
}
allPackages := make([]types.Package, 0, len(tx.DownloadedArtifacts))
for _, path := range tx.DownloadedArtifacts {
packages := probe.DetectPackages(path)
if len(packages) == 0 {
return fmt.Errorf("install: artifact verification failed for %s: unreadable or corrupt", path)
}
allPackages = append(allPackages, packages...)
}
verified := make(map[string]CandidateNode, len(allPackages))
for _, pkg := range allPackages {
normalizeVerifiedPackage(&pkg)
if pkg.Dependencies != nil {
pkg.Dependencies.Authentic = true
}
verified[pkg.Id.StringPlatformName()] = CandidateNode{
Package: pkg,
ProvenancePath: []string{"verified"},
Advisory: false,
}
}
tx.VerifiedGraph = verified
tx.AdvanceTo(PhaseVerified)
return nil
}
func normalizeVerifiedPackage(pkg *types.Package) {
sm := slugmap.Default()
src := sourceForPlatform(pkg.Id.Platform)
if src == types.SourceUnknown {
return
}
if slug, ok := sm.GetLoose(src, string(pkg.Id.Name)); ok {
pkg.Id.Name = types.ProjectName(slug)
}
if pkg.Dependencies == nil {
return
}
for i, dep := range pkg.Dependencies.Value {
depSrc := sourceForPlatform(dep.Id.Platform)
if depSrc == types.SourceUnknown {
continue
}
if slug, ok := sm.GetLoose(depSrc, string(dep.Id.Name)); ok {
pkg.Dependencies.Value[i].Id.Name = types.ProjectName(slug)
}
}
}
func sourceForPlatform(p types.Platform) types.Source {
switch p {
case types.PlatformFabric, types.PlatformForge, types.PlatformNeoforge:
return types.SourceModrinth
case types.PlatformMCDR:
return types.SourceMCDR
default:
return types.SourceUnknown
}
}
package install
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path"
"strings"
"time"
"charm.land/huh/v2"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/probe"
tuiprogress "github.com/mclucy/lucy/tui/progress"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream/mojang"
"github.com/mclucy/lucy/util"
)
const minecraftEULAURL = "https://aka.ms/MinecraftEULA"
type mojangVersionDetail struct {
Downloads struct {
Server *struct {
Sha1 string `json:"sha1"`
Url string `json:"url"`
} `json:"server"`
} `json:"downloads"`
}
func installMinecraftServer(id types.PackageId) error {
manifest, err := fetchMojangVersionManifest()
if err != nil {
return err
}
versionId, versionURL, err := resolveMinecraftVersionEntry(
manifest,
id.Version,
)
if err != nil {
return err
}
detail, err := fetchMojangVersionDetail(versionURL)
if err != nil {
return err
}
if detail.Downloads.Server == nil {
return fmt.Errorf(
"minecraft version %s does not provide a dedicated server jar",
versionId,
)
}
workPath := probe.ServerInfo().WorkPath
if workPath == "" {
workPath = "."
}
if err := ensureMinecraftEULAAccepted(workPath); err != nil {
return err
}
serverJar, err := downloadMinecraftServerJar(
detail.Downloads.Server.Url,
detail.Downloads.Server.Sha1,
workPath,
)
if err != nil {
return fmt.Errorf("download minecraft server jar failed: %w", err)
}
defer func() { _ = serverJar.Close() }()
if err := addExecutePermission(serverJar); err != nil {
return err
}
return nil
}
func fetchMojangVersionManifest() (
*exttype.ApiMojangMinecraftVersionManifest,
error,
) {
data, err := util.CachedGetBytes(
mojang.VersionManifestURL,
util.BytesRequestOptions{Kind: cache.KindMetadata},
)
if err != nil {
return nil, fmt.Errorf("fetch mojang version manifest failed: %w", err)
}
manifest := &exttype.ApiMojangMinecraftVersionManifest{}
if err := json.Unmarshal(data, manifest); err != nil {
return nil, fmt.Errorf("parse mojang version manifest failed: %w", err)
}
if len(manifest.Versions) == 0 {
return nil, errors.New("mojang version manifest has no versions")
}
return manifest, nil
}
func resolveMinecraftVersionEntry(
manifest *exttype.ApiMojangMinecraftVersionManifest,
targetVersion types.RawVersion,
) (string, string, error) {
selected := targetVersion.String()
if targetVersion == "" || targetVersion.CanInfer() || targetVersion == types.VersionUnknown {
selected = manifest.Latest.Release
}
if strings.EqualFold(selected, "snapshot") {
selected = manifest.Latest.Snapshot
}
for i := range manifest.Versions {
if manifest.Versions[i].Id == selected {
return manifest.Versions[i].Id, manifest.Versions[i].Url, nil
}
}
return "", "", fmt.Errorf(
"minecraft version %s not found in mojang manifest",
targetVersion.String(),
)
}
func fetchMojangVersionDetail(versionURL string) (*mojangVersionDetail, error) {
data, err := util.CachedGetBytes(
versionURL,
util.BytesRequestOptions{
Kind: cache.KindMetadata,
TTL: 7 * 24 * time.Hour,
},
)
if err != nil {
return nil, fmt.Errorf(
"fetch minecraft version metadata failed: %w",
err,
)
}
detail := &mojangVersionDetail{}
if err := json.Unmarshal(data, detail); err != nil {
return nil, fmt.Errorf(
"parse minecraft version metadata failed: %w",
err,
)
}
return detail, nil
}
func downloadMinecraftServerJar(
url string,
expectedSha1 string,
dir string,
) (*os.File, error) {
tracker := tuiprogress.NewTracker("Downloading server")
defer func() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = tuiprogress.WaitForShutdown(ctx)
}()
defer tracker.Close()
result, err := util.CachedDownload(
url, dir, util.DownloadOptions{
Kind: cache.KindArtifact,
ExpectedHash: expectedSha1,
HashAlgorithm: cache.HashSHA1,
WrapReader: tracker.ProxyReader,
OnResolvedFilename: func(name string) {
tracker.SetTitle(name)
},
OnCacheHit: func() {
tracker.Complete("cache hit")
time.Sleep(500 * time.Millisecond)
},
},
)
if err != nil {
return nil, err
}
return result.File, nil
}
func ensureMinecraftEULAAccepted(workPath string) error {
if hasAcceptedEULA(workPath) {
return nil
}
accepted := false
form := huh.NewForm(
huh.NewGroup(
huh.NewConfirm().
Title("Minecraft EULA consent required").
Description("To install and run the official server, you must agree to Mojang EULA: " + minecraftEULAURL).
Affirmative("I Agree").
Negative("Cancel").
Value(&accepted),
),
)
err := form.Run()
if err != nil {
return fmt.Errorf(
"unable to confirm EULA acceptance interactively after reviewing %s: %w",
minecraftEULAURL, err,
)
}
if !accepted {
return fmt.Errorf(
"minecraft server installation aborted: EULA was not accepted (%s)",
minecraftEULAURL,
)
}
return writeMinecraftEULAFile(workPath)
}
func hasAcceptedEULA(workPath string) bool {
data, err := os.ReadFile(path.Join(workPath, "eula.txt"))
if err != nil {
return false
}
return strings.Contains(strings.ToLower(string(data)), "eula=true")
}
func writeMinecraftEULAFile(workPath string) error {
content := strings.Join(
[]string{
"# By changing the setting below to TRUE you are indicating your agreement to the Minecraft EULA.",
"# " + minecraftEULAURL,
"eula=true",
"",
},
"\n",
)
if _, err := os.Stat(path.Join(workPath)); os.IsNotExist(err) {
err = os.MkdirAll(path.Join(workPath), 0o755)
if err != nil {
return err
}
}
err := os.WriteFile(path.Join(workPath, "eula.txt"), []byte(content), 0o644)
if err != nil {
return fmt.Errorf("write eula.txt failed: %w", err)
}
return nil
}
func addExecutePermission(file *os.File) error {
info, err := file.Stat()
if err != nil {
return fmt.Errorf("read server jar file mode failed: %w", err)
}
mode := info.Mode()
if mode&0o111 == 0o111 {
return nil
}
if err := file.Chmod(mode | 0o111); err != nil {
return fmt.Errorf(
"set execute permission on server jar failed: %w",
err,
)
}
return nil
}
// Package cipher provides runtime decryption for embedded secrets.
// Designed to deter casual extraction, not provide complete security.
//
// Usage:
// - Build time: go run ./internal/cipher -encrypt YOUR_API_KEY > .cipher_ciphertext
// - Runtime: key := cipher.Decode()
package cipher
import (
"encoding/hex"
"flag"
"fmt"
"io"
"os"
"golang.org/x/crypto/chacha20poly1305"
)
var (
// Key is the decryption key embedded in the binary.
// Generated at build time via go run ./cmd/cipher -keygen
Key = ""
keyBytes []byte
)
func init() {
if Key == "" {
return
}
var err error
keyBytes, err = hex.DecodeString(Key)
if err != nil {
panic(err)
}
}
// Ciphertext is the encrypted API key embedded in the binary.
// Generated at build time via go run ./internal/cipher -encrypt YOUR_KEY
var Ciphertext = ""
// Decode decrypts the embedded ciphertext and returns the plaintext.
func Decode() (string, error) {
if Ciphertext == "" {
return "", nil
}
data, err := hex.DecodeString(Ciphertext)
if err != nil {
return "", err
}
aead, err := chacha20poly1305.NewX(keyBytes)
if err != nil {
return "", err
}
if len(data) < aead.NonceSize() {
return "", err
}
nonce := data[:aead.NonceSize()]
ciphertext := data[aead.NonceSize():]
plaintext, err := aead.Open(nil, nonce, ciphertext, nil)
if err != nil {
return "", err
}
return string(plaintext), nil
}
// Encrypt encrypts plaintext using the embedded Key and returns hex ciphertext.
func Encrypt(plaintext string) (string, error) {
aead, err := chacha20poly1305.NewX(keyBytes)
if err != nil {
return "", err
}
var nonce [24]byte
f, err := os.Open("/dev/urandom")
if err != nil {
return "", err
}
io.ReadFull(f, nonce[:])
f.Close()
ciphertext := aead.Seal(nil, nonce[:], []byte(plaintext), nil)
result := make([]byte, 0, len(nonce)+len(ciphertext))
result = append(result, nonce[:]...)
result = append(result, ciphertext...)
return hex.EncodeToString(result), nil
}
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0])
fmt.Fprintf(os.Stderr, "\nOptions:\n")
fmt.Fprintf(os.Stderr, " -keygen Generate new random key\n")
fmt.Fprintf(os.Stderr, " -encrypt KEY Encrypt KEY with Key\n")
flag.PrintDefaults()
}
keygen := flag.Bool("keygen", false, "generate new key")
encrypt := flag.String("encrypt", "", "encrypt API key")
flag.Parse()
switch {
case *keygen:
var data [32]byte
f, err := os.Open("/dev/urandom")
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
io.ReadFull(f, data[:])
f.Close()
fmt.Printf("cipher_key=%x\n", data)
os.Exit(0)
case *encrypt != "":
aead, err := chacha20poly1305.NewX(keyBytes)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
var nonce [24]byte
f, err := os.Open("/dev/urandom")
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
io.ReadFull(f, nonce[:])
f.Close()
ciphertext := aead.Seal(nil, nonce[:], []byte(*encrypt), nil)
fmt.Printf("cipher_ciphertext=%x\n", nonce)
fmt.Printf("%x\n", ciphertext)
default:
flag.Usage()
}
}
// Package logger provides structured logging with clear separation between
// log-file entries (operational diagnostics) and user-facing messages
// (displayed on stderr).
//
// # Function sets
//
// There are three tiers of logging functions plus a fatal shortcut:
//
// File-only Info Warn Error Debug → written to log file; echoed on console only in verboseWrite mode
// User-display ShowInfo ShowWarn ShowError → printed to stderr for the user; NOT persisted to log file
// Both ReportInfo ReportWarn ReportError → written to log file AND printed to stderr
// Fatal Fatal → logged + displayed + os.Exit(1)
//
// All writes to the log file are synchronous (no queue). A history buffer
// records every file-written entry so that [DumpHistory] can replay them to
// the console at program exit for post-mortem inspection.
package logger
import (
"fmt"
"os"
"time"
"github.com/mclucy/lucy/tools"
)
// Logging only functions
// Info logs an informational entry to the log file.
// In verboseWrite mode the entry is also printed to the console.
func Info(content any) {
e := &entry{Time: time.Now(), Level: LevelInfo, Content: content}
record(e)
writeToFile(e)
if verboseWrite && LevelInfo >= VerboseLevel {
writeToConsole(e)
}
}
// Warn logs a warning to the log file.
// In verboseWrite mode the entry is also printed to the console.
func Warn(content error) {
if content == nil {
return
}
e := &entry{Time: time.Now(), Level: LevelWarn, Content: content}
record(e)
writeToFile(e)
if verboseWrite && LevelWarn >= VerboseLevel {
writeToConsole(e)
}
}
// Error logs an error to the log file.
// In verboseWrite mode the entry is also printed to the console.
func Error(content error) {
if content == nil {
return
}
e := &entry{Time: time.Now(), Level: LevelError, Content: content}
record(e)
writeToFile(e)
if verboseWrite && LevelError >= VerboseLevel {
writeToConsole(e)
}
}
// Debug logs a debug entry to the log file. No-op unless debug mode is on.
// In verboseWrite mode (implied by debug) the entry is also printed to the console.
func Debug(content any) {
if !debug {
return
}
e := &entry{Time: time.Now(), Level: LevelDebug, Content: content}
record(e)
writeToFile(e)
if verboseWrite {
writeToConsole(e)
}
}
// User-display only functions
// ShowInfo displays an informational message to the user on stderr.
// The message is NOT written to the log file.
func ShowInfo(content any) {
writeToConsole(&entry{Time: time.Now(), Level: LevelInfo, Content: content})
}
// ShowWarn displays a warning to the user on stderr.
// The message is NOT written to the log file.
func ShowWarn(content error) {
writeToConsole(&entry{Time: time.Now(), Level: LevelWarn, Content: content})
}
// ShowError displays an error to the user on stderr.
// The message is NOT written to the log file.
func ShowError(content error) {
writeToConsole(
&entry{
Time: time.Now(), Level: LevelError, Content: content,
},
)
}
// Both file and user-display functions
// ReportInfo logs an informational message to the file AND displays it to
// the user on stderr.
func ReportInfo(content any) {
e := &entry{Time: time.Now(), Level: LevelInfo, Content: content}
record(e)
writeToFile(e)
writeToConsole(e)
}
// ReportWarn logs a warning to the file AND displays it to the user on
// stderr.
func ReportWarn(content error) {
if content == nil {
return
}
e := &entry{Time: time.Now(), Level: LevelWarn, Content: content}
record(e)
writeToFile(e)
writeToConsole(e)
}
// ReportError logs an error to the file AND displays it to the user on
// stderr.
func ReportError(content error) {
if content == nil {
return
}
e := &entry{Time: time.Now(), Level: LevelError, Content: content}
record(e)
writeToFile(e)
writeToConsole(e)
}
// Fatal logs a fatal error to the file, displays it to the user, then
// Fatal logs a fatal error to the file, displays it to the user, then
// calls os.Exit(1). Pending history is dumped before exit.
func Fatal(content error) {
e := &entry{Time: time.Now(), Level: LevelFatal, Content: content}
record(e)
writeToFile(e)
writeToConsole(e)
DumpHistory()
os.Exit(1)
}
// DumpHistory replays all recorded log entries to the console. This is
// intended to be called from a deferred function in main for post-mortem
// inspection in verboseWrite/debug mode.
//
// Entries already shown via verboseWrite mode will appear again — this is
// intentional so that the dump provides a complete, uninterrupted
// chronological view.
func DumpHistory() {
if !dumpHistory || len(history) == 0 {
return
}
_, _ = fmt.Fprintln(os.Stderr)
_, _ = fmt.Fprintln(
os.Stderr,
tools.Dim("── Log history ("+getLogFile().Name()+") ──"),
)
for _, e := range history {
timestamp := tools.Dim(e.Time.Format("15:04:05"))
_, _ = fmt.Fprintln(
os.Stderr,
timestamp,
e.Level.prefix(true),
e.Content,
)
}
}
package logger
import "sync"
// This file contains initialization and global state for the logger package.
func init() {
// TODO: This is only for development. In production, this variable will
// be read from a config file or environment variable.
VerboseLevel = LevelDebug
}
var (
debug bool // when true, Debug() entries are recorded
verboseWrite bool // when true, file-only entries are also printed to console
dumpHistory bool // when true, DumpHistory() will print the history to console
)
var (
mu sync.Mutex // write lock for history
history []*entry
)
// VerboseLevel controls which levels are echoed to the console in verboseWrite
// mode. Everything at or above this level is shown. Set to LevelDebug so
// that all entries are visible.
var VerboseLevel Level
// EnablePrintLogs enables echoing of file-only log entries to the console.
func EnablePrintLogs() { verboseWrite = true }
// EnableDebug enables Debug-level logging
func EnableDebug() { debug = true }
func EnableDumpHistory() { dumpHistory = true }
package logger
import (
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/mclucy/lucy/tools"
)
func writeToFile(e *entry) {
timestamp := e.Time.Format("2006-01-02 15:04:05")
_, _ = fmt.Fprintln(getLogFile(), timestamp, e.Level.prefix(false), e.Content)
}
func writeToConsole(e *entry) {
_, _ = fmt.Fprintln(os.Stderr, e.Level.prefix(true), e.Content)
}
func record(e *entry) {
mu.Lock()
history = append(history, e)
mu.Unlock()
}
var getLogFile = tools.Memoize(logFile)
// GetLogFile returns the log file handle, creating it on first call.
func GetLogFile() *os.File {
return getLogFile()
}
func logDir() string {
var logDir string
switch runtime.GOOS {
case "windows":
logDir = filepath.Join(os.Getenv("APPDATA"), "lucy", "logs")
case "darwin":
logDir = filepath.Join(os.Getenv("HOME"), "Library", "Logs", "lucy")
case "linux":
logDir = filepath.Join(
os.Getenv("HOME"),
".local",
"share",
"lucy",
"logs",
)
default:
logDir = "./logs"
}
return logDir
}
func logFile() *os.File {
logDir := logDir()
err := os.MkdirAll(logDir, 0o755)
if err != nil {
devNull, _ := os.Open(os.DevNull)
return devNull
}
logFilePath := filepath.Join(logDir, "lucy.log")
logFile, err := os.OpenFile(
logFilePath,
os.O_APPEND|os.O_CREATE|os.O_WRONLY,
0o755,
)
if err != nil {
println(err.Error())
devNull, _ := os.Open(os.DevNull)
return devNull
}
return logFile
}
package logger
import (
"time"
"github.com/mclucy/lucy/tools"
)
// Level represents the severity of a log entry.
// Levels are ordered from least to most severe: Debug < Info < Warn < Error < Fatal.
type Level uint8
const (
LevelDebug Level = iota
LevelInfo
LevelWarn
LevelError
LevelFatal
)
func (l Level) String() string {
switch l {
case LevelDebug:
return "DEBUG"
case LevelInfo:
return "INFO"
case LevelWarn:
return "WARN"
case LevelError:
return "ERROR"
case LevelFatal:
return "FATAL"
default:
return "UNKNOWN"
}
}
// levelColor maps each level to a styling function.
var levelColor = map[Level]func(any) string{
LevelDebug: tools.Green,
LevelInfo: tools.Cyan,
LevelWarn: tools.Yellow,
LevelError: tools.Red,
LevelFatal: tools.Red,
}
// prefix returns the bracketed level tag, optionally colored.
func (l Level) prefix(colored bool) string {
if colored {
return "[" + levelColor[l](l.String()) + "]"
}
return "[" + l.String() + "]"
}
// Entry represents a single log item with its timestamp, level, and content. This is used internally for recording history and is not exposed to users of the logger package.
type entry struct {
Time time.Time
Level Level
Content any
}
// TODO: REPLACE ALL io.ReadAll WITH STREAMING METHODS
package main
import (
"os"
"github.com/mclucy/lucy/cmd"
"github.com/mclucy/lucy/logger"
)
func main() {
defer logger.DumpHistory() // Whether DumpHistory actually does anything depends on the flag.
if err := cmd.Execute(); err != nil {
os.Exit(1)
}
}
package detector
import (
"archive/zip"
"fmt"
"os"
"path"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
// Executable analyzes a JAR file using all registered detectors and collects
// all executable evidence candidates in registration order.
func Executable(filePath string) *ExecutableCandidates {
file, err := os.Open(filePath)
if err != nil {
logger.Debug("Failed to open file: " + err.Error())
return nil
}
defer tools.CloseReader(file, logger.Warn)
stat, err := file.Stat()
if err != nil {
logger.Debug("Failed to stat file: " + err.Error())
return nil
}
zipReader, err := zip.NewReader(file, stat.Size())
if err != nil {
logger.Debug("Failed to read JAR file: " + err.Error())
return nil
}
bridgeMarkers := DetectBridgeMarkers(zipReader)
candidates := &ExecutableCandidates{
Candidates: make([]*ExecutableEvidence, 0),
}
detectors := getExecutableDetectors()
for _, detector := range detectors {
result, err := detector.Detect(filePath, zipReader, file)
if err != nil || result == nil {
continue
}
result.BridgeHints = mergeBridgeHints(result.BridgeHints, bridgeMarkers)
candidates.Candidates = append(candidates.Candidates, result)
}
return candidates
}
func mergeBridgeHints(existing []string, markers []BridgeMarker) []string {
if len(existing) == 0 && len(markers) == 0 {
return nil
}
merged := make([]string, 0, len(existing)+len(markers))
seen := make(map[string]struct{}, len(existing)+len(markers))
for _, hint := range existing {
if _, ok := seen[hint]; ok {
continue
}
seen[hint] = struct{}{}
merged = append(merged, hint)
}
for _, marker := range markers {
if _, ok := seen[marker.NodeID]; ok {
continue
}
seen[marker.NodeID] = struct{}{}
merged = append(merged, marker.NodeID)
}
return merged
}
// Packages analyzes a mod/plugin file and returns detected packages.
// Cross-ecosystem conflicts within a single JAR are resolved here per the
// precedence policy defined in probe/probe_topology_enrich.go. If detected
// packages span two incompatible ecosystem families (e.g. proxy + server), the
// result is nil — callers treat the file as unresolved rather than guessing.
func Packages(filePath string) (res []types.Package) {
file, err := os.Open(filePath)
if err != nil {
return nil
}
defer tools.CloseReader(file, logger.Warn)
stat, err := file.Stat()
if err != nil {
return nil
}
switch path.Ext(filePath) {
case ".jar", ".zip":
zipReader, err := zip.NewReader(file, stat.Size())
if err != nil {
return nil
}
for _, detector := range getModDetectors() {
result, err := detector.Detect(zipReader, file)
if err != nil || result == nil {
continue
}
res = append(res, result...)
}
if jarPlatformsConflict(res) {
logger.Warn(fmt.Errorf(
"ambiguous JAR %q: packages span incompatible ecosystems, treating as unresolved",
filePath,
))
return nil
}
res = aggregateBukkitFamilyPackages(res)
case ".pyz", ".mcdr":
McdrPlugin(filePath)
default:
return nil
}
return
}
// jarPlatformsConflict returns true when the detected packages span two or more
// ecosystem families that cannot coexist in a single deployable JAR.
//
// Ecosystem families (mirror the policy in probe/probe_topology_enrich.go):
//
// proxyFamily – velocity, bungeecord
// serverFamily – bukkit, paper, leaves, folia, spigot
// modFamily – fabric, forge, neoforge
//
// PlatformAny packages (e.g. Sponge plugins) are intentionally excluded from
// the conflict check because they do not signal a specific incompatible family.
func jarPlatformsConflict(pkgs []types.Package) bool {
if len(pkgs) == 0 {
return false
}
proxyPlatforms := map[types.Platform]struct{}{
types.Platform("velocity"): {},
types.Platform("bungeecord"): {},
}
serverPlatforms := map[types.Platform]struct{}{
types.Platform("bukkit"): {},
types.Platform("paper"): {},
types.Platform("leaves"): {},
types.Platform("folia"): {},
types.Platform("spigot"): {},
}
modPlatforms := map[types.Platform]struct{}{
types.PlatformFabric: {},
types.PlatformForge: {},
types.PlatformNeoforge: {},
}
var hasProxy, hasServer, hasMod bool
for _, pkg := range pkgs {
p := pkg.Id.Platform
if p == types.PlatformAny {
continue
}
if _, ok := proxyPlatforms[p]; ok {
hasProxy = true
}
if _, ok := serverPlatforms[p]; ok {
hasServer = true
}
if _, ok := modPlatforms[p]; ok {
hasMod = true
}
}
families := 0
if hasProxy {
families++
}
if hasServer {
families++
}
if hasMod {
families++
}
return families > 1
}
func McdrPlugin(filePath string) (res []types.Package) {
file, err := os.Open(filePath)
if err != nil {
return nil
}
defer tools.CloseReader(file, logger.Warn)
stat, err := file.Stat()
if err != nil {
return nil
}
zipReader, err := zip.NewReader(file, stat.Size())
if err != nil {
return nil
}
detector := getOtherPackageDetectors()["mcdr plugin"]
result, err := detector.Detect(zipReader, file)
if err != nil || result == nil {
return nil
}
res = append(res, result...)
return
}
// Environment checks for environment indicators (like MCDR)
func Environment(dir string) (env types.EnvironmentInfo) {
detectors := getEnvironmentDetectors()
for _, detector := range detectors {
detector.Detect(dir, &env)
}
return
}
package detector
import (
"archive/zip"
"bufio"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
)
var arclightJarNamePattern = regexp.MustCompile(
`^arclight-(?:forge|neoforge|fabric)-(\d+\.\d+(?:\.\d+)?)(?:[-.].*)?\.jar$`,
)
type arclightServerDetector struct{}
func (d *arclightServerDetector) Name() string {
return "arclight server"
}
// Sources:
// - https://arclight.izzel.io/
// - https://deepwiki.com/IzzelAliz/Arclight/1-overview
func (d *arclightServerDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (*ExecutableEvidence, error) {
manifest, ok, err := readArchiveEntry(zipReader, "META-INF/MANIFEST.MF")
if err != nil {
return nil, err
}
if !ok {
return nil, nil
}
manifestSignals := parseArclightManifest(manifest)
if !manifestSignals.valid() {
return nil, nil
}
hasLaunchProps, err := archiveContains(zipReader, "arclight-server-launch.properties")
if err != nil {
return nil, err
}
hasCommonJar, err := archiveContains(zipReader, "common.jar")
if err != nil {
return nil, err
}
if !hasLaunchProps && !hasCommonJar {
return nil, nil
}
gameVersion := manifestSignals.gameVersion
if !hasConcreteVersion(gameVersion) {
gameVersion = parseArclightGameVersionFromPath(filePath)
}
return &ExecutableEvidence{
PrimaryEntrance: filePath,
GameVersion: gameVersion,
RuntimeIdentities: []types.PackageId{
{
Platform: types.PlatformAny,
Name: syntax.ToProjectName("arclight"),
Version: manifestSignals.loaderVersion,
},
{
Platform: types.PlatformMinecraft,
Name: syntax.ToProjectName("minecraft"),
Version: gameVersion,
},
},
Topology: &types.RuntimeTopology{
PrimaryNode: "arclight",
Nodes: []types.RuntimeNode{
{
ID: "arclight",
Role: types.RuntimeRoleHybrid,
Capabilities: []types.RuntimeCapability{
types.CapabilityForgeMods,
types.CapabilityBukkitPlugins,
},
},
},
},
}, nil
}
type arclightManifestSignals struct {
mainClass string
implementation string
mixinConnector string
loaderVersion types.RawVersion
gameVersion types.RawVersion
}
func (s arclightManifestSignals) valid() bool {
return s.mainClass == "io.izzel.arclight.server.Launcher" &&
s.implementation == "Arclight" &&
s.mixinConnector == "io.izzel.arclight.common.mod.ArclightConnector"
}
func parseArclightManifest(data []byte) arclightManifestSignals {
var signals arclightManifestSignals
scanner := bufio.NewScanner(strings.NewReader(string(data)))
for scanner.Scan() {
line := scanner.Text()
switch {
case strings.HasPrefix(line, "Main-Class: "):
signals.mainClass = strings.TrimSpace(strings.TrimPrefix(line, "Main-Class: "))
case strings.HasPrefix(line, "Implementation-Title: "):
signals.implementation = strings.TrimSpace(strings.TrimPrefix(line, "Implementation-Title: "))
case strings.HasPrefix(line, "Implementation-Version: "):
version := strings.TrimSpace(strings.TrimPrefix(line, "Implementation-Version: "))
signals.loaderVersion = types.RawVersion(version)
if parsedGameVersion := parseArclightGameVersionFromImplementation(version); hasConcreteVersion(parsedGameVersion) {
signals.gameVersion = parsedGameVersion
}
case strings.HasPrefix(line, "MixinConnector: "):
signals.mixinConnector = strings.TrimSpace(strings.TrimPrefix(line, "MixinConnector: "))
}
}
return signals
}
func parseArclightGameVersionFromImplementation(version string) types.RawVersion {
if !strings.HasPrefix(version, "arclight-") {
return types.VersionUnknown
}
trimmed := strings.TrimPrefix(version, "arclight-")
parts := strings.Split(trimmed, "-")
if len(parts) == 0 || !isMinecraftReleaseVersion(parts[0]) {
return types.VersionUnknown
}
return types.RawVersion(parts[0])
}
func parseArclightGameVersionFromPath(filePath string) types.RawVersion {
match := arclightJarNamePattern.FindStringSubmatch(filepath.Base(filePath))
if match == nil {
return types.VersionUnknown
}
return types.RawVersion(match[1])
}
func archiveContains(zipReader *zip.Reader, name string) (bool, error) {
_, ok, err := readArchiveEntry(zipReader, name)
if err != nil {
return false, err
}
return ok, nil
}
func init() {
registerExecutableDetector(&arclightServerDetector{})
}
package detector
import (
"archive/zip"
"strings"
)
type BridgeMarker struct {
NodeID string
Risk int
}
// DetectBridgeMarkers scans the JAR zip entries for known bridge/proxy/protocol-bridge indicators.
// Returns all markers found (may be empty). Never returns nil.
// This is evidence-only detection - presence of a marker means the JAR contains the software,
// not that compatibility is guaranteed.
func DetectBridgeMarkers(zipReader *zip.Reader) []BridgeMarker {
markers := make(map[string]BridgeMarker)
addMarker := func(nodeID string, risk int) {
if _, exists := markers[nodeID]; exists {
return
}
markers[nodeID] = BridgeMarker{NodeID: nodeID, Risk: risk}
}
for _, entry := range zipReader.File {
entryName := entry.Name
if strings.Contains(entryName, "dev/su5ed/sinytra/connector/") ||
strings.Contains(entryName, "META-INF/services/dev.su5ed.sinytra.connector.api.ConnectorPlugin") ||
strings.Contains(entryName, "connector.mixins.json") {
addMarker("connector", 3)
}
if strings.Contains(entryName, "xyz/bluspring/kilt/") {
addMarker("kilt", 3)
}
if strings.Contains(entryName, "com/velocitypowered/proxy/") {
addMarker("velocity", 0)
}
if strings.Contains(entryName, "net/md_5/bungee/") {
addMarker("bungeecord", 0)
}
if strings.Contains(entryName, "org/geysermc/geyser/platform/standalone/") {
addMarker("geyser_standalone", 1)
continue
}
if strings.Contains(entryName, "org/geysermc/geyser/") {
addMarker("geyser", 1)
}
}
if _, hasStandalone := markers["geyser_standalone"]; hasStandalone {
delete(markers, "geyser")
}
if len(markers) == 0 {
return []BridgeMarker{}
}
result := make([]BridgeMarker, 0, len(markers))
for _, nodeID := range []string{"connector", "kilt", "velocity", "bungeecord", "geyser_standalone", "geyser"} {
if marker, exists := markers[nodeID]; exists {
result = append(result, marker)
}
}
return result
}
package detector
import (
"archive/zip"
"bytes"
"encoding/json"
"io"
"os"
"strings"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
"gopkg.in/yaml.v3"
)
const (
bukkitPluginDescriptorPath = "plugin.yml"
paperPluginDescriptorPath = "paper-plugin.yml"
leavesPluginDescriptorPath = "leaves-plugin.json"
)
type bukkitDetector struct{}
type bukkitPluginDescriptor struct {
Name string `yaml:"name"`
Version string `yaml:"version"`
Main string `yaml:"main"`
Description string `yaml:"description"`
Author string `yaml:"author"`
Authors []string `yaml:"authors"`
Website string `yaml:"website"`
APIVersion string `yaml:"api-version"`
FoliaSupported bool `yaml:"folia-supported"`
PaperPluginLoad string `yaml:"paper-plugin-loader"`
}
type paperPluginDescriptor struct {
Name string `yaml:"name"`
Version string `yaml:"version"`
Main string `yaml:"main"`
Bootstrapper string `yaml:"bootstrapper"`
Loader string `yaml:"loader"`
Description string `yaml:"description"`
Author string `yaml:"author"`
Authors []string `yaml:"authors"`
Website string `yaml:"website"`
APIVersion string `yaml:"api-version"`
}
type leavesPluginDescriptor struct {
Name string `json:"name"`
Version string `json:"version"`
Main string `json:"main"`
Bootstrapper string `json:"bootstrapper"`
Loader string `json:"loader"`
Description string `json:"description"`
Author string `json:"author"`
Authors []string `json:"authors"`
Website string `json:"website"`
APIVersion string `json:"api-version"`
}
func newBukkitDetector() PackageDetector {
return &bukkitDetector{}
}
func (d *bukkitDetector) Name() string {
return "paper-family plugin"
}
func (d *bukkitDetector) Detect(
zipReader *zip.Reader,
fileHandle *os.File,
) ([]types.Package, error) {
if data, ok, err := readArchiveEntry(zipReader, leavesPluginDescriptorPath); err != nil {
return nil, err
} else if ok {
pkg, err := parseLeavesPluginDescriptor(data, fileHandle.Name())
if err != nil || pkg == nil {
return nil, err
}
return []types.Package{*pkg}, nil
}
if data, ok, err := readArchiveEntry(zipReader, paperPluginDescriptorPath); err != nil {
return nil, err
} else if ok {
pkg, err := parsePaperPluginDescriptor(data, fileHandle.Name())
if err != nil || pkg == nil {
return nil, err
}
return []types.Package{*pkg}, nil
}
if data, ok, err := readArchiveEntry(zipReader, bukkitPluginDescriptorPath); err != nil {
return nil, err
} else if ok {
pkg, err := parseBukkitPluginDescriptor(data, fileHandle.Name())
if err != nil || pkg == nil {
return nil, err
}
return []types.Package{*pkg}, nil
}
return nil, nil
}
func readArchiveEntry(
zipReader *zip.Reader,
name string,
) ([]byte, bool, error) {
for _, file := range zipReader.File {
if file.Name != name {
continue
}
r, err := file.Open()
if err != nil {
return nil, false, err
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
return nil, false, err
}
return data, true, nil
}
return nil, false, nil
}
func parseBukkitPluginDescriptor(
data []byte,
localPath string,
) (*types.Package, error) {
var descriptor bukkitPluginDescriptor
if err := yaml.NewDecoder(bytes.NewReader(data)).Decode(&descriptor); err != nil {
return nil, err
}
if strings.TrimSpace(descriptor.Name) == "" ||
strings.TrimSpace(descriptor.Version) == "" ||
strings.TrimSpace(descriptor.Main) == "" {
return nil, nil
}
supports := []types.Platform{
types.Platform("bukkit"),
types.Platform("spigot"),
types.Platform("paper"),
}
if descriptor.FoliaSupported {
supports = append(supports, types.Platform("folia"))
}
pkg := buildPaperFamilyPackage(
types.Platform("bukkit"),
descriptor.Name,
descriptor.Version,
localPath,
descriptor.Description,
collectDescriptorAuthors(descriptor.Author, descriptor.Authors),
descriptor.Website,
supports,
)
return &pkg, nil
}
func parsePaperPluginDescriptor(
data []byte,
localPath string,
) (*types.Package, error) {
var descriptor paperPluginDescriptor
if err := yaml.NewDecoder(bytes.NewReader(data)).Decode(&descriptor); err != nil {
return nil, err
}
if strings.TrimSpace(descriptor.Name) == "" ||
strings.TrimSpace(descriptor.Version) == "" ||
strings.TrimSpace(descriptor.Main) == "" ||
strings.TrimSpace(descriptor.APIVersion) == "" {
return nil, nil
}
pkg := buildPaperFamilyPackage(
types.Platform("paper"),
descriptor.Name,
descriptor.Version,
localPath,
descriptor.Description,
collectDescriptorAuthors(descriptor.Author, descriptor.Authors),
descriptor.Website,
[]types.Platform{types.Platform("paper")},
)
return &pkg, nil
}
func parseLeavesPluginDescriptor(
data []byte,
localPath string,
) (*types.Package, error) {
var descriptor leavesPluginDescriptor
if err := json.Unmarshal(data, &descriptor); err != nil {
return nil, err
}
if strings.TrimSpace(descriptor.Name) == "" ||
strings.TrimSpace(descriptor.Version) == "" ||
strings.TrimSpace(descriptor.Main) == "" {
return nil, nil
}
pkg := buildPaperFamilyPackage(
types.Platform("leaves"),
descriptor.Name,
descriptor.Version,
localPath,
descriptor.Description,
collectDescriptorAuthors(descriptor.Author, descriptor.Authors),
descriptor.Website,
[]types.Platform{types.Platform("leaves"), types.Platform("paper")},
)
return &pkg, nil
}
func buildPaperFamilyPackage(
platform types.Platform,
name string,
version string,
localPath string,
description string,
authors []types.Person,
website string,
supportedPlatforms []types.Platform,
) types.Package {
return types.Package{
Id: types.PackageId{
Platform: platform,
Name: syntax.ToProjectName(name),
Version: types.RawVersion(strings.TrimSpace(version)),
},
Local: &types.PackageInstallation{
Path: localPath,
},
Supports: &types.PlatformSupport{
Platforms: supportedPlatforms,
Authentic: true,
},
Information: &types.ProjectInformation{
Title: strings.TrimSpace(name),
Description: strings.TrimSpace(description),
Authors: authors,
Urls: buildDescriptorURLs(
strings.TrimSpace(website),
),
},
}
}
func collectDescriptorAuthors(author string, authors []string) []types.Person {
people := make([]types.Person, 0, len(authors)+1)
if trimmed := strings.TrimSpace(author); trimmed != "" {
people = append(people, types.Person{Name: trimmed})
}
for _, item := range authors {
if trimmed := strings.TrimSpace(item); trimmed != "" {
people = append(people, types.Person{Name: trimmed})
}
}
return people
}
func buildDescriptorURLs(website string) []types.Url {
if website == "" {
return nil
}
return []types.Url{{
Name: "Website",
Type: types.UrlHome,
Url: website,
}}
}
func init() {
registerModDetector(newBukkitDetector())
}
package detector
import (
"archive/zip"
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"slices"
"strings"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
)
const (
bukkitManifestPath = "META-INF/MANIFEST.MF"
bukkitManifestMainClass = "org.bukkit.craftbukkit.Main"
bukkitImplementationCraftBukkit = "CraftBukkit"
bukkitPaperClassPrefix = "io/papermc/paper/"
bukkitLegacyPaperClassPrefix = "com/destroystokyo/paper/"
bukkitSpigotClassPrefix = "org/spigotmc/"
bukkitNodePaperFork types.RuntimeNodeID = "paper-fork"
bukkitNodePaper types.RuntimeNodeID = "paper"
bukkitNodeSpigot types.RuntimeNodeID = "spigot"
bukkitNodeBukkit types.RuntimeNodeID = "bukkit"
bukkitNodeMinecraft types.RuntimeNodeID = "minecraft"
)
var bukkitVersionPrefixPattern = regexp.MustCompile(`^(\d+\.\d+(?:\.\d+)?)`)
type craftBukkitFamilyDetector struct{}
type bukkitManifestSignals struct {
mainClass string
specificationTitle string
specificationVendor string
implementationTitle string
implementationVendor string
implementationVer string
}
func (d *craftBukkitFamilyDetector) Name() string {
return "craftbukkit family executable"
}
func (d *craftBukkitFamilyDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (*ExecutableEvidence, error) {
_ = fileHandle
judgment := newPaperJudgment()
manifest, ok, err := readBukkitExecutableManifest(filePath, zipReader)
if err != nil {
return nil, err
}
if !ok {
return nil, nil
}
signals := parseBukkitManifest(manifest)
metaMainClass, err := readBukkitExecutableSidecar(filePath, zipReader, paperMetaMainClassPath)
if err != nil {
return nil, err
}
reaperPatchProperties, err := readBukkitExecutablePatchProperties(filePath, zipReader)
if err != nil {
return nil, err
}
// Stage 1: Bukkit Confirmation
// CraftBukkit-derived servers consistently launch through
// org.bukkit.craftbukkit.Main, while Implementation-Title: CraftBukkit is the
// fallback family marker seen in repackaged jars that keep the canonical
// implementation branding. Extracted modern Paper fixtures keep the decisive
// Bukkit entrypoint in META-INF/main-class, while strict launcher-heavy Reaper
// and Youer fixtures only expose definitive Paper-fork proof via patch or
// manifest identity. Without one of these strict signals, we should not claim
// a Bukkit-lineage server executable.
judgment.bukkitConfirmed = signals.mainClass == bukkitManifestMainClass ||
strings.EqualFold(signals.implementationTitle, bukkitImplementationCraftBukkit) ||
metaMainClass == bukkitManifestMainClass ||
hasStrictReaperBukkitConfirmation(reaperPatchProperties) ||
hasStrictYouerBukkitConfirmation(signals)
if !judgment.bukkitConfirmed {
return nil, nil
}
judgment.addReason("bukkit confirmation satisfied")
// Stage 2: Observation Extraction
judgment.observations, err = extractPaperObservations(filePath, zipReader)
if err != nil {
return nil, err
}
// Stage 3: Family Reasoning
reasonPaperFamily(&judgment)
// Stage 4: Brand Attribution
attributePaperBrand(&judgment)
// Stage 5: Contradiction Resolution
resolvePaperContradictions(&judgment)
// Stage 6: Runtime Projection
gameVersion := judgment.observations.gameVersion
if !hasConcreteVersion(gameVersion) {
gameVersion = types.VersionUnknown
}
evidence := projectPaperJudgment(filePath, gameVersion, judgment)
if evidence == nil {
return nil, nil
}
return evidence, nil
}
func readBukkitExecutableManifest(
filePath string,
zipReader *zip.Reader,
) ([]byte, bool, error) {
if zipReader != nil {
return readArchiveEntry(zipReader, bukkitManifestPath)
}
manifestPath := filepath.Join(filePath, filepath.FromSlash(bukkitManifestPath))
data, err := os.ReadFile(manifestPath)
if err != nil {
if os.IsNotExist(err) {
return nil, false, nil
}
return nil, false, err
}
return data, true, nil
}
func reasonPaperFamily(judgment *paperJudgment) {
if judgment == nil {
return
}
switch {
case judgment.observations.hasPaperClasses:
judgment.familyResult = familyStrong
judgment.addReason("paper-family confirmed by bundled paper classes")
case hasModernPaperclipMetadataCluster(judgment.observations):
judgment.familyResult = familyStrong
judgment.addReason("paper-family confirmed by modern paperclip metadata cluster")
case hasConcreteVersion(judgment.observations.versionJSONID):
judgment.familyResult = familyWeak
judgment.addReason("paper-family remains likely from root version.json metadata")
case len(judgment.observations.patchProperties) > 0 || judgment.observations.hasPaperMCPatch:
judgment.familyResult = familyWeak
judgment.addReason("paper-family remains likely from legacy paper patch traces")
case judgment.observations.hasPaperclipNamespace || judgment.observations.hasLegacyPaperclipNamespace:
judgment.familyResult = familyWeak
judgment.addReason("paper-family remains likely from paperclip launcher namespaces")
default:
judgment.familyResult = familyMiss
judgment.addReason("paper-family evidence missing after bukkit confirmation; continuing to brand attribution")
}
}
func hasModernPaperclipMetadataCluster(obs paperObservations) bool {
return strings.TrimSpace(obs.downloadContext) != "" &&
len(obs.librariesListEntries) > 0 &&
strings.TrimSpace(obs.metaMainClass) != ""
}
func attributePaperBrand(judgment *paperJudgment) {
if judgment == nil {
return
}
brands := inferPaperObservationBrands(judgment.observations)
switch {
case len(brands) > 1:
judgment.brandResult = brandContradiction
judgment.brandName = strings.Join(brands, ",")
judgment.addReason(fmt.Sprintf("contradictory paper brands detected: %s", strings.Join(brands, ", ")))
case len(brands) == 1 && brands[0] == "paper":
judgment.brandResult = brandPaper
judgment.brandName = "paper"
judgment.addReason("brand attributed to official paper distribution")
case len(brands) == 1:
judgment.brandResult = brandFork
judgment.brandName = brands[0]
judgment.addReason("brand attributed to paper fork")
default:
judgment.brandResult = brandUnknown
judgment.addReason("no specific paper-family brand attribution available")
}
}
func inferPaperObservationBrands(obs paperObservations) []string {
brands := make([]string, 0, 8)
seen := make(map[string]struct{}, 8)
add := func(name string) {
normalized := normalizePaperBrandName(name)
if normalized == "" {
return
}
if _, ok := seen[normalized]; ok {
return
}
seen[normalized] = struct{}{}
brands = append(brands, normalized)
}
// Fixture citation: probe/internal/detector/testdata/paper_family/test_paper/paper/META-INF/libraries.list
if observationLinesContain(obs.librariesListEntries, paperLibraryPaperToken) {
add("paper")
}
// Fixture citation: probe/internal/detector/testdata/paper_family/test_folia/folia/META-INF/libraries.list
if observationLinesContain(obs.librariesListEntries, paperLibraryFoliaToken) {
add("folia")
}
// Fixture citation: probe/internal/detector/testdata/paper_family/test_divine/divine/META-INF/libraries.list
if observationLinesContain(obs.librariesListEntries, paperLibraryDivineToken) {
add("divine")
}
// Fixture citation: probe/internal/detector/testdata/paper_family/test_purpur/purpur/META-INF/libraries.list
if observationLinesContain(obs.librariesListEntries, paperLibraryPurpurToken) {
add("purpur")
}
// Fixture citation: probe/internal/detector/testdata/paper_family/test_leaf/leaf/META-INF/libraries.list and cn/dreeam/leaper/*
if observationLinesContain(obs.librariesListEntries, paperLibraryLeafToken) || obs.hasLeaperNamespace {
add("leaf")
}
// Fixture citation: probe/internal/detector/testdata/paper_family/test_leaves/leaves/META-INF/libraries.list, META-INF/build-info, META-INF/leavesclip-version
if observationLinesContain(obs.librariesListEntries, paperLibraryLeavesToken) || obs.hasLeavesclipNamespace || obs.leavesclipVersion != "" || strings.HasPrefix(obs.buildInfo, "Leaves\t") {
add("leaves")
}
// Fixture citation: probe/internal/detector/testdata/paper_family/test_reaper/reaper/patch.properties
if hasStrictReaperObservationBrand(obs) {
add("reaper")
}
// Fixture citation: probe/internal/detector/testdata/paper_family/test_youer/youer/META-INF/MANIFEST.MF
if obs.hasYouerNamespace ||
strings.EqualFold(obs.manifestSpecificationTitle, paperManifestYouerToken) ||
strings.EqualFold(obs.manifestImplementationTitle, paperManifestYouerToken) ||
strings.Contains(strings.ToLower(obs.manifestMainClass), paperMainClassYouerToken) {
add("youer")
}
slices.Sort(brands)
return brands
}
func readBukkitExecutableSidecar(
filePath string,
zipReader *zip.Reader,
entryPath string,
) (string, error) {
var (
data []byte
ok bool
err error
)
if zipReader != nil {
data, ok, err = readArchiveEntry(zipReader, entryPath)
} else {
data, ok, err = readDirectoryEntry(filePath, entryPath)
}
if err != nil || !ok {
return "", err
}
return strings.TrimSpace(string(data)), nil
}
func readBukkitExecutablePatchProperties(
filePath string,
zipReader *zip.Reader,
) (map[string]string, error) {
var (
data []byte
ok bool
err error
)
if zipReader != nil {
data, ok, err = readArchiveEntry(zipReader, paperPatchPropertiesPath)
} else {
data, ok, err = readDirectoryEntry(filePath, paperPatchPropertiesPath)
}
if err != nil || !ok {
return nil, err
}
return parsePaperPatchProperties(data), nil
}
func readDirectoryEntry(root string, entryPath string) ([]byte, bool, error) {
data, err := os.ReadFile(filepath.Join(root, filepath.FromSlash(entryPath)))
if err != nil {
if os.IsNotExist(err) {
return nil, false, nil
}
return nil, false, err
}
return data, true, nil
}
func hasStrictReaperBukkitConfirmation(properties map[string]string) bool {
return strings.Contains(properties["patch"], paperPatchReaperToken)
}
func hasStrictYouerBukkitConfirmation(signals bukkitManifestSignals) bool {
return strings.EqualFold(signals.specificationTitle, paperManifestYouerToken) ||
strings.EqualFold(signals.implementationTitle, paperManifestYouerToken) ||
strings.Contains(strings.ToLower(signals.mainClass), paperMainClassYouerToken)
}
func hasStrictReaperObservationBrand(obs paperObservations) bool {
return strings.Contains(obs.patchProperties["patch"], paperPatchReaperToken)
}
func observationLinesContain(lines []string, want string) bool {
for _, line := range lines {
if strings.Contains(line, want) {
return true
}
}
return false
}
func resolvePaperContradictions(judgment *paperJudgment) {
if judgment == nil {
return
}
if judgment.brandResult == brandContradiction {
judgment.familyResult = familyContradiction
judgment.contradictionState = fmt.Sprintf(
"brand contradiction after bukkit confirmation: %s",
nonEmptyPaperBrandName(judgment.brandName, "unknown"),
)
}
if judgment.familyResult == familyContradiction && judgment.contradictionState == "" {
judgment.contradictionState = "paper family contradiction after bukkit confirmation"
}
if judgment.contradictionState != "" {
judgment.addReason(judgment.contradictionState)
judgment.addReason("contradictory paper evidence resolved fail-closed to bukkit lineage")
}
}
func projectPaperJudgment(
filePath string,
gameVersion types.RawVersion,
judgment paperJudgment,
) *ExecutableEvidence {
if !judgment.bukkitConfirmed {
return nil
}
primaryNode := bukkitNodeBukkit
brand := "bukkit"
if judgment.contradictionState != "" {
judgment.addReason("runtime projection withheld paper promotion due to contradiction state")
} else {
switch judgment.brandResult {
case brandPaper:
primaryNode = bukkitNodePaper
brand = nonEmptyPaperBrandName(judgment.brandName, "paper")
case brandFork:
primaryNode = bukkitNodePaperFork
brand = nonEmptyPaperBrandName(judgment.brandName, "paper-fork")
case brandUnknown:
switch judgment.familyResult {
case familyStrong:
primaryNode = bukkitNodePaperFork
brand = "paper-fork"
judgment.addReason("strong paper-family evidence projected to generic paper-fork runtime")
case familyWeak:
primaryNode = bukkitNodeSpigot
brand = "spigot"
judgment.addReason("weak paper-family evidence projected to spigot runtime")
default:
judgment.addReason("family miss remains non-terminal but projects to baseline bukkit runtime")
}
case brandContradiction:
// contradictionState is set by resolvePaperContradictions; projection
// already withheld above via the contradictionState guard. This arm
// makes the switch exhaustive over all paperBrandResult values.
judgment.addReason("brand contradiction: projection falls back to bukkit baseline")
}
}
return &ExecutableEvidence{
PrimaryEntrance: filePath,
GameVersion: gameVersion,
RuntimeIdentities: []types.PackageId{
{
Platform: types.PlatformAny,
Name: syntax.ToProjectName(brand),
},
{
Platform: types.PlatformMinecraft,
Name: syntax.ToProjectName("minecraft"),
Version: gameVersion,
},
},
TopologySeed: buildBukkitExecutableTopologySeed(primaryNode),
Provenance: ExecutableDetectorProvenance{
DetectorName: (&craftBukkitFamilyDetector{}).Name(),
},
}
}
func normalizePaperBrandName(name string) string {
normalized := strings.ToLower(strings.TrimSpace(name))
if normalized == "" {
return ""
}
switch normalized {
case "craftbukkit", "bukkit":
return ""
default:
return normalized
}
}
func nonEmptyPaperBrandName(name string, fallback string) string {
if normalized := normalizePaperBrandName(name); normalized != "" {
return normalized
}
return fallback
}
func parseBukkitManifest(data []byte) bukkitManifestSignals {
var signals bukkitManifestSignals
scanner := bufio.NewScanner(strings.NewReader(string(data)))
for scanner.Scan() {
line := scanner.Text()
switch {
case strings.HasPrefix(line, "Main-Class: "):
signals.mainClass = strings.TrimSpace(
strings.TrimPrefix(
line,
"Main-Class: ",
),
)
case strings.HasPrefix(line, "Implementation-Title: "):
signals.implementationTitle = strings.TrimSpace(
strings.TrimPrefix(
line,
"Implementation-Title: ",
),
)
case strings.HasPrefix(line, "Specification-Title: "):
signals.specificationTitle = strings.TrimSpace(
strings.TrimPrefix(
line,
"Specification-Title: ",
),
)
case strings.HasPrefix(line, "Specification-Vendor: "):
signals.specificationVendor = strings.TrimSpace(
strings.TrimPrefix(
line,
"Specification-Vendor: ",
),
)
case strings.HasPrefix(line, "Implementation-Version: "):
signals.implementationVer = strings.TrimSpace(
strings.TrimPrefix(
line,
"Implementation-Version: ",
),
)
case strings.HasPrefix(line, "Implementation-Vendor: "):
signals.implementationVendor = strings.TrimSpace(
strings.TrimPrefix(
line,
"Implementation-Vendor: ",
),
)
}
}
return signals
}
func parseBukkitGameVersion(implementationVersion string) types.RawVersion {
match := bukkitVersionPrefixPattern.FindStringSubmatch(strings.TrimSpace(implementationVersion))
if len(match) < 2 || !isMinecraftReleaseVersion(match[1]) {
return types.VersionUnknown
}
return types.RawVersion(match[1])
}
func buildBukkitExecutableTopologySeed(
primaryNode types.RuntimeNodeID,
) *ExecutableTopologySeed {
nodes := []types.RuntimeNode{}
edges := []types.RuntimeEdge{}
addNode := func(id types.RuntimeNodeID) {
nodes = append(nodes, buildBukkitExecutableNode(id))
}
switch primaryNode {
case bukkitNodePaperFork:
addNode(bukkitNodePaperFork)
addNode(bukkitNodePaper)
addNode(bukkitNodeMinecraft)
edges = append(
edges,
buildBukkitImplementationEdge(
bukkitNodePaperFork,
bukkitNodePaper,
types.EdgeImplements,
),
buildBukkitImplementationEdge(
bukkitNodePaper,
bukkitNodeMinecraft,
types.EdgeModifies,
),
)
case bukkitNodePaper:
addNode(bukkitNodePaper)
addNode(bukkitNodeMinecraft)
edges = append(
edges,
buildBukkitImplementationEdge(
bukkitNodePaper,
bukkitNodeMinecraft,
types.EdgeModifies,
),
)
case bukkitNodeSpigot:
addNode(bukkitNodeSpigot)
addNode(bukkitNodeMinecraft)
edges = append(
edges,
buildBukkitImplementationEdge(
bukkitNodeSpigot,
bukkitNodeMinecraft,
types.EdgeModifies,
),
)
default:
addNode(bukkitNodeBukkit)
}
return &ExecutableTopologySeed{
PrimaryNode: primaryNode,
Nodes: nodes,
Edges: edges,
}
}
func buildBukkitExecutableNode(id types.RuntimeNodeID) types.RuntimeNode {
return types.RuntimeNode{
ID: id,
Role: types.RuntimeRolePluginCore,
Capabilities: []types.RuntimeCapability{types.CapabilityBukkitPlugins},
}
}
func buildBukkitImplementationEdge(
from types.RuntimeNodeID,
to types.RuntimeNodeID,
verb types.RuntimeEdgeVerb,
) types.RuntimeEdge {
return types.RuntimeEdge{
From: from,
To: to,
Verb: verb,
}
}
func init() {
registerExecutableDetector(&craftBukkitFamilyDetector{})
}
package detector
import (
"slices"
"github.com/mclucy/lucy/types"
)
type bukkitFamilyRank struct {
priority int
fallback types.Platform
}
func aggregateBukkitFamilyPackages(pkgs []types.Package) []types.Package {
if len(pkgs) < 2 {
return pkgs
}
bukkitIndexes := make([]int, 0, len(pkgs))
for i, pkg := range pkgs {
if isBukkitFamilyPlatform(pkg.Id.Platform) {
bukkitIndexes = append(bukkitIndexes, i)
}
}
if len(bukkitIndexes) < 2 {
return pkgs
}
bestIndex := bukkitIndexes[0]
bestRank := rankBukkitFamilyPlatform(pkgs[bestIndex].Id.Platform)
for _, idx := range bukkitIndexes[1:] {
rank := rankBukkitFamilyPlatform(pkgs[idx].Id.Platform)
if rank.priority > bestRank.priority {
bestIndex = idx
bestRank = rank
}
}
aggregated := pkgs[bestIndex]
aggregated.Supports = mergeBukkitFamilySupport(pkgs, bukkitIndexes, bestRank.fallback)
resolved := make([]types.Package, 0, len(pkgs)-len(bukkitIndexes)+1)
inserted := false
for i, pkg := range pkgs {
if !isIndexSelected(bukkitIndexes, i) {
resolved = append(resolved, pkg)
continue
}
if inserted {
continue
}
resolved = append(resolved, aggregated)
inserted = true
}
return resolved
}
func mergeBukkitFamilySupport(
pkgs []types.Package,
indexes []int,
fallback types.Platform,
) *types.PlatformSupport {
platforms := make([]types.Platform, 0, len(indexes)*2)
seen := make(map[types.Platform]struct{}, len(indexes)*2+1)
authentic := false
versions := make([]types.RawVersion, 0)
addPlatform := func(platform types.Platform) {
if !platform.Valid() {
return
}
if _, ok := seen[platform]; ok {
return
}
seen[platform] = struct{}{}
platforms = append(platforms, platform)
}
for _, idx := range indexes {
pkg := pkgs[idx]
addPlatform(pkg.Id.Platform)
if pkg.Supports == nil {
continue
}
authentic = authentic || pkg.Supports.Authentic
for _, platform := range pkg.Supports.Platforms {
addPlatform(platform)
}
versions = append(versions, pkg.Supports.MinecraftVersions...)
}
if len(platforms) == 0 {
addPlatform(fallback)
}
ordered := orderBukkitFamilyPlatforms(platforms)
if len(ordered) == 0 {
return nil
}
return &types.PlatformSupport{
MinecraftVersions: versions,
Platforms: ordered,
Authentic: authentic,
}
}
func orderBukkitFamilyPlatforms(platforms []types.Platform) []types.Platform {
ordered := make([]types.Platform, 0, len(platforms))
for _, candidate := range []types.Platform{
types.Platform("leaves"),
types.Platform("folia"),
types.Platform("paper"),
types.Platform("spigot"),
types.Platform("bukkit"),
} {
for _, platform := range platforms {
if platform == candidate {
ordered = append(ordered, platform)
break
}
}
}
return ordered
}
func rankBukkitFamilyPlatform(platform types.Platform) bukkitFamilyRank {
switch platform {
case types.Platform("leaves"), types.Platform("folia"):
return bukkitFamilyRank{priority: 4, fallback: types.Platform("paper")}
case types.Platform("paper"):
return bukkitFamilyRank{priority: 3, fallback: types.Platform("paper")}
case types.Platform("spigot"):
return bukkitFamilyRank{priority: 2, fallback: types.Platform("spigot")}
case types.Platform("bukkit"):
return bukkitFamilyRank{priority: 1, fallback: types.Platform("bukkit")}
default:
return bukkitFamilyRank{}
}
}
func isBukkitFamilyPlatform(platform types.Platform) bool {
_, ok := bukkitFamilyPlatforms[platform]
return ok
}
func isIndexSelected(indexes []int, candidate int) bool {
return slices.Contains(indexes, candidate)
}
var bukkitFamilyPlatforms = map[types.Platform]struct{}{
types.Platform("bukkit"): {},
types.Platform("spigot"): {},
types.Platform("paper"): {},
types.Platform("folia"): {},
types.Platform("leaves"): {},
}
package detector
import "github.com/mclucy/lucy/types"
// ExecutableDetectorProvenance records which detector produced an executable
// evidence candidate. This remains internal to probe/detector flow even though
// the type name is exported within the package surface for current refactor
// compatibility.
type ExecutableDetectorProvenance struct {
DetectorName string
}
// ExecutableTopologySeed captures detector-produced topology facts before final
// RuntimeInfo assembly and downstream topology enrichment choose the canonical
// runtime topology.
type ExecutableTopologySeed struct {
PrimaryNode types.RuntimeNodeID
Nodes []types.RuntimeNode
Edges []types.RuntimeEdge
}
// ExecutableEvidence is the internal detector output contract. It separates raw
// detection evidence from final public RuntimeInfo assembly while still keeping
// the current detector package compatible during the refactor.
type ExecutableEvidence struct {
PrimaryEntrance string
GameVersion types.RawVersion
Topology *types.RuntimeTopology
TopologySeed *ExecutableTopologySeed
RuntimeIdentities []types.PackageId
BridgeHints []string
Provenance ExecutableDetectorProvenance
}
// ExecutableCandidates groups all detector candidates for one executable so the
// aggregator can resolve ambiguity before materializing RuntimeInfo.
type ExecutableCandidates struct {
Candidates []*ExecutableEvidence
}
func executableEvidenceFromRuntimeInfo(runtime *types.RuntimeInfo) *ExecutableEvidence {
if runtime == nil || runtime == types.NoExecutable || runtime == types.UnknownExecutable {
return nil
}
evidence := &ExecutableEvidence{
PrimaryEntrance: runtime.PrimaryEntrance,
GameVersion: runtime.GameVersion,
Topology: runtime.Topology,
RuntimeIdentities: append([]types.PackageId(nil), runtime.RuntimeIdentities...),
BridgeHints: append([]string(nil), runtime.BridgeHints...),
}
if runtime.Topology != nil {
evidence.TopologySeed = &ExecutableTopologySeed{
PrimaryNode: runtime.Topology.PrimaryNode,
Nodes: append([]types.RuntimeNode(nil), runtime.Topology.Nodes...),
Edges: append([]types.RuntimeEdge(nil), runtime.Topology.Edges...),
}
}
return evidence
}
func (c *ExecutableCandidates) IsEmpty() bool {
return c == nil || len(c.Candidates) == 0
}
func (c *ExecutableCandidates) IsAmbiguous() bool {
return c != nil && len(c.Candidates) > 1
}
func (c *ExecutableCandidates) Single() *ExecutableEvidence {
if c == nil || len(c.Candidates) != 1 {
return nil
}
return c.Candidates[0]
}
var _ = executableEvidenceFromRuntimeInfo
package detector
import (
"archive/zip"
"bufio"
"encoding/json"
"io"
"os"
"strings"
"sync"
externaltype "github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream/slugresolve"
)
// fabricServerSingleFileDetector detects Fabric single-file servers
// This is one of the two methods of Fabric installation. One larger .jar file
// it placed at the root of the server directory. It handles the initialization
// and the downloading of the required libraries and minecraft version.
type fabricServerSingleFileDetector struct{}
func (d *fabricServerSingleFileDetector) Name() string {
return "fabric server"
}
func (d *fabricServerSingleFileDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (exec *ExecutableEvidence, err error) {
loaderVersion := types.VersionUnknown
gameVersion := types.VersionUnknown
for _, f := range zipReader.File {
if f.Name == "install.properties" {
r, err := f.Open()
if err != nil {
continue
}
defer tools.CloseReader(r, logger.Warn)
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
if after, found := strings.CutPrefix(
line,
"fabric-loader-version=",
); found {
loaderVersion = types.RawVersion(after)
} else if after, found := strings.CutPrefix(
line,
"game-version=",
); found {
gameVersion = types.RawVersion(after)
}
}
if loaderVersion == types.VersionUnknown || gameVersion == types.VersionUnknown {
continue
}
break
}
}
if loaderVersion == types.VersionUnknown || gameVersion == types.VersionUnknown {
return nil, nil
}
exec = &ExecutableEvidence{
PrimaryEntrance: filePath,
GameVersion: gameVersion,
RuntimeIdentities: []types.PackageId{
{
Platform: types.PlatformFabric,
Name: "fabric",
Version: loaderVersion,
},
{
Platform: types.PlatformMinecraft,
Name: "minecraft",
Version: gameVersion,
},
},
Topology: &types.RuntimeTopology{
PrimaryNode: "fabric",
Nodes: []types.RuntimeNode{
{
ID: "fabric",
Role: types.RuntimeRoleModLoader,
Capabilities: []types.RuntimeCapability{types.CapabilityFabricMods},
},
},
},
}
return exec, nil
}
// fabricServerLauncherDetector detects Fabric server launchers
// This is one of the two methods of Fabric installation. A lightweight
// launcher .jar file is placed at the root of the server directory. It only
// records the paths to the required libraries.
//
// The detection process is rather complicated.
type fabricServerLauncherDetector struct{}
func (d *fabricServerLauncherDetector) Name() string {
return "fabric server"
}
func (d *fabricServerLauncherDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (exec *ExecutableEvidence, err error) {
var valid bool
for _, f := range zipReader.File {
if f.Name == "fabric-server-launch.properties" {
r, err := f.Open()
if err != nil {
continue
}
defer tools.CloseReader(r, logger.Warn)
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
if line == "launch.mainClass=net.fabricmc.loader.impl.launch.knot.KnotServer" {
valid = true
break
}
}
}
}
if !valid {
return nil, nil
}
loaderVersion := types.VersionUnknown
gameVersion := types.VersionUnknown
for _, f := range zipReader.File {
if f.Name == "META-INF/MANIFEST.MF" {
r, err := f.Open()
if err != nil {
continue
}
defer tools.CloseReader(r, logger.Warn)
var classPaths []string
s := bufio.NewScanner(r)
for s.Scan() {
line := s.Text()
if after, found := strings.CutPrefix(
line,
"Class-Path: ",
); found {
var classPathsBuilder strings.Builder
classPathsBuilder.WriteString(after)
for s.Scan() && !strings.Contains(s.Text(), ":") {
line := s.Text()
line = strings.TrimSpace(line)
classPathsBuilder.WriteString(line)
}
classPaths = strings.Split(classPathsBuilder.String(), " ")
}
}
// Here we just parse the paths to find the versions.
//
// Although been seemingly unreliable, this is a justified method.
// The lightweight launcher .jar's idea is to not restrictively
// specify anything but only the paths to the libraries(classes).
// Besides, it is the user's responsibility to ensure the presence
// of the required libraries.
for _, path := range classPaths {
if after, found := strings.CutPrefix(
path,
"libraries/net/fabricmc/fabric-loader/",
); found {
loaderVersion = types.RawVersion(
strings.Split(after, "/")[0],
)
} else if after, found := strings.CutPrefix(
path,
"libraries/net/fabricmc/intermediary/",
); found {
gameVersion = types.RawVersion(
strings.Split(after, "/")[0],
)
}
}
if loaderVersion == types.VersionUnknown || gameVersion == types.VersionUnknown {
continue
}
exec = &ExecutableEvidence{
PrimaryEntrance: filePath,
GameVersion: gameVersion,
RuntimeIdentities: []types.PackageId{
{
Platform: types.PlatformFabric,
Name: "fabric",
Version: loaderVersion,
},
{
Platform: types.PlatformMinecraft,
Name: "minecraft",
Version: gameVersion,
},
},
Topology: &types.RuntimeTopology{
PrimaryNode: "fabric",
Nodes: []types.RuntimeNode{
{
ID: "fabric",
Role: types.RuntimeRoleModLoader,
Capabilities: []types.RuntimeCapability{types.CapabilityFabricMods},
},
},
},
}
return exec, nil
}
}
return nil, nil
}
// fabricModDetector detects Fabric mods in JAR files
type fabricModDetector struct{}
func (d *fabricModDetector) Name() string {
return "fabric mod"
}
func (d *fabricModDetector) Detect(
zipReader *zip.Reader,
fileHandle *os.File,
) (packages []types.Package, err error) {
var wg sync.WaitGroup
for _, f := range zipReader.File {
if f.Name == "fabric.mod.json" {
r, err := f.Open()
if err != nil {
return nil, err
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
modInfo := &externaltype.FileFabricModIdentifier{}
err = json.Unmarshal(data, modInfo)
if err != nil {
return nil, err
}
pkg := translateFabricMod(modInfo, fileHandle.Name())
packages = append(packages, pkg)
// Pre-populate slugmap for both sources using metadata URLs and file hash
var metaURLs []string
for _, key := range []string{"homepage", "sources", "issues"} {
if u := modInfo.Contact[key]; u != "" {
metaURLs = append(metaURLs, u)
}
}
wg.Add(2)
go func(name, path string, urls []string) {
defer wg.Done()
slugresolve.ResolveSlug(types.SourceModrinth, name, path, urls)
}(string(pkg.Id.Name), fileHandle.Name(), metaURLs)
go func(name, path string, urls []string) {
defer wg.Done()
slugresolve.ResolveSlug(types.SourceCurseForge, name, path, urls)
}(string(pkg.Id.Name), fileHandle.Name(), metaURLs)
}
}
wg.Wait()
return packages, nil
}
func init() {
registerExecutableDetector(&fabricServerSingleFileDetector{})
registerExecutableDetector(&fabricServerLauncherDetector{})
registerModDetector(&fabricModDetector{})
}
package detector
import (
"strings"
"github.com/mclucy/lucy/dependency"
externaltype "github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
// parseFabricVersionRanges parses a Fabric VersionRange value where each item
// in the outer slice is an OR alternative.
func parseFabricVersionRanges(
ranges tools.SingleOrSlice[string],
) types.VersionConstraintExpression {
return dependency.ParseRanges(
[]string(ranges),
dependency.InferRangeDialect(types.PlatformFabric),
types.Semver,
)
}
func translateFabricMod(
modInfo *externaltype.FileFabricModIdentifier,
localPath string,
) types.Package {
pkg := types.Package{
Id: types.PackageId{
Platform: types.PlatformFabric,
Name: syntax.ToProjectName(modInfo.Id),
Version: types.RawVersion(modInfo.Version),
},
Local: &types.PackageInstallation{
Path: localPath,
},
Dependencies: &types.PackageDependencies{},
Information: &types.ProjectInformation{},
}
embeddedNames := fabricEmbeddedModNames(modInfo)
pkg.Dependencies.Value = append(pkg.Dependencies.Value,
translateFabricDependencyMap(modInfo.Depends, true, false, embeddedNames)...,
)
pkg.Dependencies.Value = append(pkg.Dependencies.Value,
translateFabricDependencyMap(modInfo.Recommends, false, false, embeddedNames)...,
)
pkg.Dependencies.Value = append(pkg.Dependencies.Value,
translateFabricDependencyMap(modInfo.Suggests, false, false, embeddedNames)...,
)
pkg.Dependencies.Value = append(pkg.Dependencies.Value,
translateFabricDependencyMap(modInfo.Breaks, false, true, embeddedNames)...,
)
pkg.Dependencies.Value = append(pkg.Dependencies.Value,
translateFabricDependencyMap(modInfo.Conflicts, false, true, embeddedNames)...,
)
pkg.Information = &types.ProjectInformation{
Title: modInfo.Name,
Description: modInfo.Description,
License: modInfo.License,
Authors: func() []types.Person {
authors := make([]types.Person, len(modInfo.Authors))
for i, author := range modInfo.Authors {
authors[i] = types.Person{Name: string(author)}
}
return authors
}(),
}
return pkg
}
func translateFabricDependencyMap(
deps map[string]tools.SingleOrSlice[string],
mandatory bool,
inverse bool,
embeddedNames map[string]struct{},
) []types.Dependency {
translated := make([]types.Dependency, 0, len(deps))
for k, v := range deps {
name := syntax.ToProjectName(k)
_, embedded := embeddedNames[string(name)]
dep := types.Dependency{
Id: types.PackageId{
Platform: types.PlatformFabric,
Name: name,
},
Constraint: parseFabricVersionRanges(v),
Mandatory: mandatory,
Embedded: embedded,
}
if inverse {
dep.Constraint.Inverse()
}
translated = append(translated, dep)
}
return translated
}
func fabricEmbeddedModNames(modInfo *externaltype.FileFabricModIdentifier) map[string]struct{} {
depNames := make([]string, 0, len(modInfo.Depends))
for k := range modInfo.Depends {
depNames = append(depNames, k)
}
names := make(map[string]struct{}, len(modInfo.Jars))
for _, jar := range modInfo.Jars {
base := jar.File
if idx := strings.LastIndex(base, "/"); idx >= 0 {
base = base[idx+1:]
}
base = strings.TrimSuffix(base, ".jar")
for _, dep := range depNames {
if base == dep || strings.HasPrefix(base, dep+"-") {
names[dep] = struct{}{}
break
}
}
}
return names
}
package detector
import (
"archive/zip"
"bufio"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream/slugresolve"
"github.com/pelletier/go-toml"
)
type forgeLegacyDetector struct{}
func (d *forgeLegacyDetector) Name() string {
return "forge legacy server"
}
// Sources:
// - https://docs.minecraftforge.net/en/1.16.x/gettingstarted/
// - https://forums.minecraftforge.net/topic/102544-forge-370-minecraft-1171/
func (d *forgeLegacyDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (*ExecutableEvidence, error) {
base := filepath.Base(filePath)
if !strings.Contains(base, "forge-") || !strings.Contains(base, "universal") {
return nil, nil
}
forgeVersion, gameVersion := parseForgeManifest(zipReader)
if !hasConcreteVersion(forgeVersion) {
return nil, nil
}
if !hasConcreteVersion(gameVersion) {
gameVersion, _, _ = parseForgeVersionTupleFromPath(filePath)
}
if !hasConcreteVersion(gameVersion) {
return nil, nil
}
return buildForgeRuntimeInfo(filePath, gameVersion, forgeVersion), nil
}
type forgeModernDetector struct{}
func (d *forgeModernDetector) Name() string {
return "forge modern server"
}
// Sources:
// - https://docs.minecraftforge.net/en/latest/gettingstarted/server/
// - https://forums.minecraftforge.net/topic/102544-forge-370-minecraft-1171/
func (d *forgeModernDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (*ExecutableEvidence, error) {
gameVersion, forgeVersion, ok := parseForgeVersionTupleFromPath(filePath)
if !ok || compareForgeMajor(forgeVersion, 61) >= 0 {
return nil, nil
}
base := filepath.Base(filePath)
if !strings.HasPrefix(base, "forge-") {
return nil, nil
}
if !strings.Contains(base, "-server") && !strings.Contains(base, "-universal") {
return nil, nil
}
if !forgeHasSibling(filePath, "unix_args.txt", "win_args.txt") {
return nil, nil
}
return buildForgeRuntimeInfo(filePath, gameVersion, forgeVersion), nil
}
type forgeLatestDetector struct{}
func (d *forgeLatestDetector) Name() string {
return "forge latest server"
}
// Sources:
// - https://docs.minecraftforge.net/en/latest/gettingstarted/server/
// - https://forums.minecraftforge.net/topic/154652-how-to-install-forge-6110-for-1211-server/
func (d *forgeLatestDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (*ExecutableEvidence, error) {
gameVersion, forgeVersion, ok := parseForgeVersionTupleFromPath(filePath)
if !ok || compareForgeMajor(forgeVersion, 61) < 0 {
return nil, nil
}
base := filepath.Base(filePath)
if !strings.HasPrefix(base, "forge-") || !strings.Contains(base, "-server") {
return nil, nil
}
if !forgeHasSibling(
filePath,
"unix_args.txt",
"win_args.txt",
strings.Replace(base, "-server.jar", "-shim.jar", 1),
strings.Replace(base, "-server.jar", "-universal.jar", 1),
) {
return nil, nil
}
return buildForgeRuntimeInfo(filePath, gameVersion, forgeVersion), nil
}
// forgeServerDetector detects Forge servers via manifest metadata fallback.
type forgeServerDetector struct{}
func (d *forgeServerDetector) Name() string {
return "forge server"
}
// Sources:
// - https://docs.minecraftforge.net/en/latest/gettingstarted/server/
// - https://docs.minecraftforge.net/en/1.16.x/gettingstarted/
func (d *forgeServerDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (*ExecutableEvidence, error) {
forgeVersion, gameVersion := parseForgeManifest(zipReader)
if !hasConcreteVersion(forgeVersion) {
return nil, nil
}
if !hasConcreteVersion(gameVersion) {
parsedGameVersion, _, ok := parseForgeVersionTupleFromPath(filePath)
if ok {
gameVersion = parsedGameVersion
}
}
if !hasConcreteVersion(gameVersion) {
return nil, nil
}
return buildForgeRuntimeInfo(filePath, gameVersion, forgeVersion), nil
}
func parseForgeManifest(
zipReader *zip.Reader,
) (forgeVersion types.RawVersion, gameVersion types.RawVersion) {
for _, f := range zipReader.File {
if f.Name != "META-INF/MANIFEST.MF" {
continue
}
r, err := f.Open()
if err != nil {
continue
}
defer tools.CloseReader(r, logger.Warn)
s := bufio.NewScanner(r)
for s.Scan() {
line := s.Text()
if line == "Implementation-Title: net.minecraftforge" {
if !s.Scan() {
continue
}
if after, found := strings.CutPrefix(
s.Text(),
"Implementation-Version: ",
); found {
forgeVersion = types.RawVersion(after)
}
}
if strings.HasPrefix(line, "Specification-Version: ") {
if after, found := strings.CutPrefix(
line,
"Specification-Version: ",
); found && isMinecraftReleaseVersion(after) {
gameVersion = types.RawVersion(after)
}
}
}
break
}
return forgeVersion, gameVersion
}
func isMinecraftReleaseVersion(version string) bool {
if !strings.HasPrefix(version, "1.") {
return false
}
for _, r := range version {
if (r < '0' || r > '9') && r != '.' {
return false
}
}
return true
}
func compareForgeMajor(version types.RawVersion, target int) int {
major := strings.Split(string(version), ".")[0]
switch major {
case "":
return -1
case "61":
if target == 61 {
return 0
}
if target < 61 {
return 1
}
return -1
default:
if major > "61" {
if target <= 61 {
return 1
}
return -1
}
return -1
}
}
// getForgeModVersion extracts the version from a Forge JAR's manifest
// when the mod version is set to `${file.jarVersion}`.
func getForgeModVersion(zipReader *zip.Reader) types.RawVersion {
var r io.ReadCloser
var err error
for _, f := range zipReader.File {
if f.Name == "META-INF/MANIFEST.MF" {
r, err = f.Open()
if err != nil {
return types.VersionUnknown
}
defer tools.CloseReader(r, logger.Warn)
break
}
}
if r == nil {
return types.VersionUnknown
}
data, err := io.ReadAll(r)
if err != nil {
return types.VersionUnknown
}
manifest := string(data)
const versionField = "Implementation-Version: "
idx := strings.Index(manifest, versionField)
if idx == -1 {
return types.VersionUnknown
}
i := idx + len(versionField)
v := manifest[i:]
v = strings.Split(v, "\r")[0]
v = strings.Split(v, "\n")[0]
return types.RawVersion(v)
}
func forgeHasSibling(filePath string, siblings ...string) bool {
dir := filepath.Dir(filePath)
for _, sibling := range siblings {
if _, err := os.Stat(filepath.Join(dir, sibling)); err == nil {
return true
}
}
return false
}
// forgeModDetector detects new Forge mods (1.13+)
type forgeModDetector struct{}
func (d *forgeModDetector) Name() string {
return "forge mod"
}
func (d *forgeModDetector) Detect(
zipReader *zip.Reader,
fileHandle *os.File,
) (packages []types.Package, err error) {
var wg sync.WaitGroup
for _, f := range zipReader.File {
if f.Name == "META-INF/mods.toml" {
r, err := f.Open()
if err != nil {
return nil, err
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
modIdentifier := &exttype.FileModLoaderIdentifier{}
err = toml.Unmarshal(data, modIdentifier)
if err != nil {
return nil, err
}
for _, mod := range modIdentifier.Mods {
if mod.ModID == "forge" {
continue
}
version := types.RawVersion(mod.Version)
if version == "${file.jarVersion}" {
version = getForgeModVersion(zipReader)
}
rawDeps := modIdentifier.Dependencies[mod.ModID]
depSpecs := make([]modLoaderDependencySpec, 0, len(rawDeps))
for _, dep := range rawDeps {
depSpecs = append(depSpecs, modLoaderDependencySpec{
modID: dep.ModID,
mandatory: dep.Mandatory,
versionRange: dep.VersionRange,
})
}
p := translateModLoaderPackage(
types.PlatformForge,
fileHandle.Name(),
mod.ModID,
version,
depSpecs,
modIdentifier.License,
mod.DisplayName,
mod.Description,
mod.Authors,
mod.DisplayURL,
modIdentifier.IssueTrackerURL,
)
packages = append(packages, p)
var metaURLs []string
for _, u := range p.Information.Urls {
if u.Url != "" {
metaURLs = append(metaURLs, u.Url)
}
}
wg.Add(2)
go func(name, path string, urls []string) {
defer wg.Done()
slugresolve.ResolveSlug(types.SourceModrinth, name, path, urls)
}(string(p.Id.Name), fileHandle.Name(), metaURLs)
go func(name, path string, urls []string) {
defer wg.Done()
slugresolve.ResolveSlug(types.SourceCurseForge, name, path, urls)
}(string(p.Id.Name), fileHandle.Name(), metaURLs)
}
}
}
wg.Wait()
return packages, nil
}
func init() {
registerExecutableDetector(&forgeLegacyDetector{})
registerExecutableDetector(&forgeModernDetector{})
registerExecutableDetector(&forgeLatestDetector{})
registerExecutableDetector(&forgeServerDetector{})
registerModDetector(&forgeModDetector{})
}
package detector
import (
"path/filepath"
"regexp"
"strings"
"github.com/mclucy/lucy/dependency"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
)
type modLoaderDependencySpec struct {
modID string
mandatory bool
versionRange string
}
var forgeRuntimeVersionDirPattern = regexp.MustCompile(
`^(\d+\.\d+(?:\.\d+)?)-(\d+(?:\.\d+)+)$`,
)
var forgeJarNameVersionPattern = regexp.MustCompile(
`^forge-(\d+\.\d+(?:\.\d+)?)-(\d+(?:\.\d+)+)(?:-[a-z]+)?\.jar$`,
)
// parseModLoaderMavenVersionRange parses Forge dependency version ranges.
//
// References:
// - https://docs.minecraftforge.net/en/latest/gettingstarted/modfiles/
// - https://maven.apache.org/enforcer/enforcer-rules/versionRanges.html
func parseModLoaderMavenVersionRange(interval string) [][]types.VersionConstraint {
return dependency.ParseRange(
interval,
dependency.InferRangeDialect(types.PlatformForge),
types.Semver,
)
}
func translateModLoaderPackage(
platform types.Platform,
localPath string,
modID string,
version types.RawVersion,
deps []modLoaderDependencySpec,
license string,
displayName string,
description string,
authors string,
displayURL string,
issueTrackerURL string,
) types.Package {
pkg := types.Package{
Id: types.PackageId{
Platform: platform,
Name: syntax.ToProjectName(modID),
Version: version,
},
Local: &types.PackageInstallation{
Path: localPath,
},
Dependencies: &types.PackageDependencies{},
Information: &types.ProjectInformation{},
}
pkg.Dependencies.Value = append(pkg.Dependencies.Value,
translateModLoaderDependencies(platform, deps)...,
)
pkg.Information = &types.ProjectInformation{
Title: displayName,
Brief: description,
Authors: []types.Person{{Name: authors}},
License: license,
Urls: []types.Url{
{
Name: "URL",
Type: types.UrlHome,
Url: displayURL,
},
{
Name: "Issue Tracker",
Type: types.UrlIssues,
Url: issueTrackerURL,
},
},
}
return pkg
}
func translateModLoaderDependencies(
platform types.Platform,
deps []modLoaderDependencySpec,
) []types.Dependency {
translated := make([]types.Dependency, 0, len(deps))
for _, dep := range deps {
translated = append(translated, types.Dependency{
Id: types.PackageId{
Platform: platform,
Name: syntax.ToProjectName(dep.modID),
},
Constraint: parseModLoaderMavenVersionRange(dep.versionRange),
Mandatory: dep.mandatory,
})
}
return translated
}
func parseForgeVersionTupleFromPath(
filePath string,
) (gameVersion types.RawVersion, forgeVersion types.RawVersion, ok bool) {
parts := strings.Split(filepath.ToSlash(filePath), "/")
for i := 0; i < len(parts)-1; i++ {
if parts[i] != "forge" {
continue
}
match := forgeRuntimeVersionDirPattern.FindStringSubmatch(parts[i+1])
if match == nil {
continue
}
return types.RawVersion(match[1]), types.RawVersion(match[2]), true
}
if match := forgeJarNameVersionPattern.FindStringSubmatch(filepath.Base(filePath)); match != nil {
return types.RawVersion(match[1]), types.RawVersion(match[2]), true
}
return types.VersionUnknown, types.VersionUnknown, false
}
func hasConcreteVersion(version types.RawVersion) bool {
return version != "" && !version.IsInvalid() && !version.CanInfer()
}
func buildForgeRuntimeInfo(
filePath string,
gameVersion types.RawVersion,
forgeVersion types.RawVersion,
) *ExecutableEvidence {
return &ExecutableEvidence{
PrimaryEntrance: filePath,
GameVersion: gameVersion,
RuntimeIdentities: []types.PackageId{
{
Platform: types.PlatformForge,
Name: "forge",
Version: forgeVersion,
},
{
Platform: types.PlatformMinecraft,
Name: "minecraft",
Version: gameVersion,
},
},
Topology: &types.RuntimeTopology{
PrimaryNode: "forge",
Nodes: []types.RuntimeNode{
{
ID: "forge",
Role: types.RuntimeRoleModLoader,
Capabilities: []types.RuntimeCapability{types.CapabilityForgeMods},
},
},
},
}
}
package detector
import (
"archive/zip"
"crypto/sha1"
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/util"
)
const forgeMavenArtifactBaseURL = "https://maven.minecraftforge.net/net/minecraftforge/forge"
type forgeArtifactKind string
const (
forgeArtifactServer forgeArtifactKind = "server"
forgeArtifactUniversal forgeArtifactKind = "universal"
forgeArtifactShim forgeArtifactKind = "shim"
)
var forgeArtifactHashLookup = lookupForgeArtifactHash
func ForgeInstallationRuntimes(workPath string) []*ExecutableEvidence {
forgeLib := filepath.Join(workPath, "libraries", "net", "minecraftforge", "forge")
entries, err := os.ReadDir(forgeLib)
if err != nil {
return nil
}
runtimes := make([]*ExecutableEvidence, 0, len(entries))
for _, entry := range entries {
if !entry.IsDir() {
continue
}
runtime, err := detectForgeInstallFromVersionDir(filepath.Join(forgeLib, entry.Name()))
if err != nil || runtime == nil {
continue
}
runtimes = append(runtimes, runtime)
}
return runtimes
}
func detectForgeInstallFromVersionDir(versionDir string) (*ExecutableEvidence, error) {
version := filepath.Base(versionDir)
match := forgeRuntimeVersionDirPattern.FindStringSubmatch(version)
if match == nil {
return nil, nil
}
gameVersion := types.RawVersion(match[1])
forgeVersion := types.RawVersion(match[2])
candidates := []struct {
kind forgeArtifactKind
path string
}{
{forgeArtifactServer, filepath.Join(versionDir, fmt.Sprintf("forge-%s-server.jar", version))},
{forgeArtifactUniversal, filepath.Join(versionDir, fmt.Sprintf("forge-%s-universal.jar", version))},
{forgeArtifactShim, filepath.Join(versionDir, fmt.Sprintf("forge-%s-shim.jar", version))},
}
for _, candidate := range candidates {
if !fileExists(candidate.path) {
continue
}
verified, err := forgeArtifactHashLookup(version, candidate.kind, candidate.path)
if err == nil && verified {
if candidate.kind == forgeArtifactShim {
continue
}
return buildForgeRuntimeInfo(candidate.path, gameVersion, forgeVersion), nil
}
}
for _, candidate := range candidates[:2] {
if !fileExists(candidate.path) {
continue
}
ok, err := verifyForgeArtifactByUnpack(candidate.path, candidate.kind, gameVersion, forgeVersion)
if err != nil || !ok {
continue
}
return buildForgeRuntimeInfo(candidate.path, gameVersion, forgeVersion), nil
}
return nil, nil
}
func verifyForgeArtifactByUnpack(
jarPath string,
kind forgeArtifactKind,
gameVersion types.RawVersion,
forgeVersion types.RawVersion,
) (bool, error) {
file, err := os.Open(jarPath)
if err != nil {
return false, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return false, err
}
reader, err := zip.NewReader(file, stat.Size())
if err != nil {
return false, err
}
if kind == forgeArtifactUniversal {
manifestForge, manifestGame := parseForgeManifest(reader)
return manifestForge == forgeVersion && manifestGame == gameVersion, nil
}
if kind == forgeArtifactServer {
if compareForgeMajor(forgeVersion, 61) >= 0 {
shimPath := filepath.Join(filepath.Dir(jarPath), fmt.Sprintf("forge-%s-%s-shim.jar", gameVersion, forgeVersion))
if !fileExists(shimPath) {
return false, nil
}
ok, err := verifyForgeShimJar(shimPath)
if err != nil || !ok {
return false, err
}
}
return forgeHasSibling(jarPath, "unix_args.txt", "win_args.txt"), nil
}
return false, nil
}
func verifyForgeShimJar(jarPath string) (bool, error) {
file, err := os.Open(jarPath)
if err != nil {
return false, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return false, err
}
reader, err := zip.NewReader(file, stat.Size())
if err != nil {
return false, err
}
hasProperties := false
hasList := false
for _, f := range reader.File {
switch f.Name {
case "bootstrap-shim.properties":
hasProperties = true
case "bootstrap-shim.list":
hasList = true
}
}
return hasProperties && hasList, nil
}
func lookupForgeArtifactHash(version string, artifact forgeArtifactKind, filePath string) (bool, error) {
sha1URL := fmt.Sprintf("%s/%s/%s.sha1", forgeMavenArtifactBaseURL, version, filepath.Base(filePath))
if ok, err := verifyForgeArtifactHash(filePath, sha1URL, cache.HashSHA1); ok || err != nil {
return ok, err
}
sha256URL := fmt.Sprintf("%s/%s/%s.sha256", forgeMavenArtifactBaseURL, version, filepath.Base(filePath))
return verifyForgeArtifactHash(filePath, sha256URL, cache.HashSHA256)
}
func verifyForgeArtifactHash(filePath string, checksumURL string, algo cache.HashAlgorithm) (bool, error) {
data, err := util.CachedGetBytes(checksumURL, util.BytesRequestOptions{Kind: cache.KindMetadata, MaxBytes: 256})
if err != nil {
return false, nil
}
expected := strings.TrimSpace(string(data))
if expected == "" {
return false, nil
}
actual, err := hashForgeArtifact(filePath, algo)
if err != nil {
return false, err
}
return strings.EqualFold(actual, expected), nil
}
func hashForgeArtifact(filePath string, algo cache.HashAlgorithm) (string, error) {
data, err := os.ReadFile(filePath)
if err != nil {
return "", err
}
switch algo {
case cache.HashSHA1:
sum := sha1.Sum(data)
return hex.EncodeToString(sum[:]), nil
case cache.HashSHA256:
sum := sha256.Sum256(data)
return hex.EncodeToString(sum[:]), nil
default:
return "", fmt.Errorf("unsupported forge artifact hash algorithm: %s", algo)
}
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
package detector
import (
"archive/zip"
"bufio"
"os"
"path/filepath"
"strings"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
)
type geyserStandaloneDetector struct{}
func (d *geyserStandaloneDetector) Name() string {
return "geyser standalone"
}
// Sources:
// - https://geysermc.org/wiki/geyser/setup/self/standalone
// - https://geysermc.org/wiki/geyser/setup/self/proxy-servers
// - https://geysermc.org/wiki/geyser/faq/
func (d *geyserStandaloneDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (*ExecutableEvidence, error) {
manifest, ok, err := readArchiveEntry(zipReader, "META-INF/MANIFEST.MF")
if err != nil {
return nil, err
}
if !ok {
return nil, nil
}
signals := parseGeyserStandaloneManifest(manifest)
if !signals.valid() {
return nil, nil
}
hasStandaloneBootstrap, err := archiveContains(
zipReader,
"org/geysermc/geyser/platform/standalone/GeyserStandaloneBootstrap.class",
)
if err != nil {
return nil, err
}
if !hasStandaloneBootstrap {
return nil, nil
}
version := signals.version
if !hasConcreteVersion(version) {
version = parseGeyserStandaloneVersionFromPath(filePath)
}
return &ExecutableEvidence{
PrimaryEntrance: filePath,
GameVersion: types.VersionUnknown,
RuntimeIdentities: []types.PackageId{{
Platform: types.PlatformAny,
Name: syntax.ToProjectName("geyser"),
Version: version,
}},
Topology: &types.RuntimeTopology{
PrimaryNode: "geyser_standalone",
Nodes: []types.RuntimeNode{{
ID: "geyser_standalone",
Role: types.RuntimeRoleProxy,
Capabilities: []types.RuntimeCapability{
types.CapabilityProxying,
types.CapabilityProtocolBridge,
},
}},
},
BridgeHints: []string{"geyser_standalone"},
}, nil
}
type geyserStandaloneManifestSignals struct {
mainClass string
version types.RawVersion
}
func (s geyserStandaloneManifestSignals) valid() bool {
return s.mainClass == "org.geysermc.geyser.platform.standalone.GeyserStandaloneBootstrap"
}
func parseGeyserStandaloneManifest(data []byte) geyserStandaloneManifestSignals {
var signals geyserStandaloneManifestSignals
scanner := bufio.NewScanner(strings.NewReader(string(data)))
for scanner.Scan() {
line := scanner.Text()
switch {
case strings.HasPrefix(line, "Main-Class: "):
signals.mainClass = strings.TrimSpace(strings.TrimPrefix(line, "Main-Class: "))
case strings.HasPrefix(line, "Implementation-Version: "):
signals.version = types.RawVersion(strings.TrimSpace(strings.TrimPrefix(line, "Implementation-Version: ")))
}
}
return signals
}
func parseGeyserStandaloneVersionFromPath(filePath string) types.RawVersion {
base := strings.ToLower(filepath.Base(filePath))
if strings.Contains(base, "geyser") && strings.Contains(base, "standalone") {
return types.VersionUnknown
}
return types.VersionUnknown
}
func init() {
registerExecutableDetector(&geyserStandaloneDetector{})
}
package detector
import (
"archive/zip"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path"
"strings"
"github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
"gopkg.in/yaml.v3"
"github.com/mclucy/lucy/logger"
)
const mcdrConfigFileName = "config.yml"
// McdrDetector detects MCDR (MCDReforged) installations
type McdrDetector struct{}
func (d *McdrDetector) Name() string {
return "mcdr"
}
func (d *McdrDetector) Detect(dir string, env *types.EnvironmentInfo) {
configPath := path.Join(dir, mcdrConfigFileName)
if _, err := os.Stat(configPath); os.IsNotExist(err) {
return
}
// File exists, try to read it
configFile, err := os.Open(configPath)
if err != nil {
logger.Warn(err)
return
}
defer func(configFile io.ReadCloser) {
err := configFile.Close()
if err != nil {
logger.Warn(err)
}
}(configFile)
configData, err := io.ReadAll(configFile)
if err != nil {
logger.Warn(err)
return
}
config := &exttype.FileMcdrConfig{}
if err := yaml.Unmarshal(configData, config); err != nil {
logger.Warn(err)
return
}
bytes, err := exec.Command("mcdreforged", "--version").Output()
if err != nil {
logger.ReportWarn(
fmt.Errorf(
"cannot execute mcdr, it is in your $PATH?: %w",
err,
),
)
}
// `mcdreforged --version` output is like "MCDReforged 2.13.2"
version := types.RawVersion(strings.Split(string(bytes), " ")[1])
env.Mcdr = &types.McdrEnv{
Version: version,
Config: config,
}
}
func init() {
registerEnvironmentDetector(&McdrDetector{})
registerOtherPackageDetector(&McdrPluginDetector{})
}
type McdrPluginDetector struct{}
func (d *McdrPluginDetector) Name() string {
return "mcdr plugin"
}
func (d *McdrPluginDetector) Detect(
zipReader *zip.Reader,
fileHandle *os.File,
) (packages []types.Package, err error) {
var pkg types.Package
for _, f := range zipReader.File {
if f.Name == "mcdreforged.plugin.json" {
r, err := f.Open()
if err != nil {
return nil, err
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
pluginInfo := &exttype.FileMcdrPluginIdentifier{}
if err := json.Unmarshal(data, pluginInfo); err != nil {
return nil, err
}
pkg = translateMcdrPlugin(pluginInfo, fileHandle.Name())
}
}
packages = append(packages, pkg)
return packages, nil
}
package detector
import (
"github.com/mclucy/lucy/dependency"
"github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
)
// parseNpmVersionRange parses MCDR plugin dependency requirements.
//
// References:
// - https://docs.mcdreforged.com/en/latest/plugin_dev/metadata.html
// - https://docs.npmjs.com/about-semantic-versioning
//
// Note: call sites remain unchanged in detector; the parser implementation is
// centralized in the dependency package.
func parseNpmVersionRange(s string) types.VersionConstraintExpression {
return dependency.ParseRange(
s,
dependency.InferRangeDialect(types.PlatformMCDR),
types.Semver,
)
}
func translateMcdrPlugin(
pluginInfo *exttype.FileMcdrPluginIdentifier,
localPath string,
) types.Package {
pkg := types.Package{
Id: types.PackageId{
Platform: types.PlatformMCDR,
Name: syntax.ToProjectName(pluginInfo.Id),
Version: types.RawVersion(pluginInfo.Version),
},
Local: &types.PackageInstallation{
Path: localPath,
},
Dependencies: &types.PackageDependencies{},
Information: &types.ProjectInformation{},
}
pkg.Dependencies.Value = append(pkg.Dependencies.Value,
translateMcdrDependencies(pluginInfo.Dependencies)...,
)
pkg.Information.Authors = make([]types.Person, len(pluginInfo.Author))
for i, author := range pluginInfo.Author {
pkg.Information.Authors[i] = types.Person{Name: author}
}
pkg.Information.Title = pluginInfo.Name
pkg.Information.Brief = pluginInfo.Description.EnUs
pkg.Information.Urls = []types.Url{{
Name: "Link",
Type: types.UrlSource,
Url: pluginInfo.Link,
}}
return pkg
}
func translateMcdrDependencies(deps map[string]string) []types.Dependency {
translated := make([]types.Dependency, 0, len(deps))
for key, value := range deps {
translated = append(translated, types.Dependency{
Id: types.PackageId{
Platform: types.PlatformMCDR,
Name: syntax.ToProjectName(key),
},
Constraint: parseNpmVersionRange(value),
Mandatory: true,
})
}
return translated
}
package detector
import (
"crypto/sha1"
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/util"
)
type modLoaderArtifactKind string
const (
modLoaderArtifactServer modLoaderArtifactKind = "server"
modLoaderArtifactUniversal modLoaderArtifactKind = "universal"
modLoaderArtifactShim modLoaderArtifactKind = "shim"
)
type modLoaderInstallSpec struct {
platform types.Platform
name string
libraryRoot string
mavenBaseURL string
candidateNames func(versionDir, version string) []modLoaderCandidate
unpackVerify func(candidate modLoaderCandidate, gameVersion, loaderVersion types.RawVersion) (bool, error)
}
type modLoaderCandidate struct {
kind modLoaderArtifactKind
path string
}
func detectModLoaderInstallFromVersionDir(
versionDir string,
spec modLoaderInstallSpec,
hashLookup func(version string, artifact modLoaderArtifactKind, filePath string) (bool, error),
) (*ExecutableEvidence, error) {
version := filepath.Base(versionDir)
gameVersion, loaderVersion, ok := parseModLoaderVersionTuple(versionDir, spec.platform)
if !ok {
return nil, nil
}
candidates := spec.candidateNames(versionDir, version)
for _, candidate := range candidates {
if !fileExists(candidate.path) {
continue
}
verified, err := hashLookup(version, candidate.kind, candidate.path)
if err == nil && verified {
if candidate.kind == modLoaderArtifactShim {
continue
}
return buildModLoaderRuntimeInfo(spec.platform, spec.name, candidate.path, gameVersion, loaderVersion), nil
}
}
for _, candidate := range candidates {
if candidate.kind == modLoaderArtifactShim || !fileExists(candidate.path) {
continue
}
ok, err := spec.unpackVerify(candidate, gameVersion, loaderVersion)
if err != nil || !ok {
continue
}
return buildModLoaderRuntimeInfo(spec.platform, spec.name, candidate.path, gameVersion, loaderVersion), nil
}
return nil, nil
}
var neoforgeVersionDirPattern = regexp.MustCompile(`^(\d+)\.(\d+)(?:\.\d+)*$`)
func parseModLoaderVersionTuple(versionDir string, platform types.Platform) (gameVersion, loaderVersion types.RawVersion, ok bool) {
name := filepath.Base(versionDir)
switch platform {
case types.PlatformForge:
match := forgeRuntimeVersionDirPattern.FindStringSubmatch(name)
if match == nil {
return types.VersionUnknown, types.VersionUnknown, false
}
return types.RawVersion(match[1]), types.RawVersion(match[2]), true
case types.PlatformNeoforge:
match := neoforgeVersionDirPattern.FindStringSubmatch(name)
if match == nil {
return types.VersionUnknown, types.VersionUnknown, false
}
gameVersion := types.RawVersion("1." + match[1] + "." + match[2])
return gameVersion, types.RawVersion(name), true
default:
return types.VersionUnknown, types.VersionUnknown, false
}
}
func modLoaderInstallationRuntimes(workPath string, spec modLoaderInstallSpec, hashLookup func(version string, artifact modLoaderArtifactKind, filePath string) (bool, error)) []*ExecutableEvidence {
libraryRoot := filepath.Join(workPath, spec.libraryRoot)
entries, err := os.ReadDir(libraryRoot)
if err != nil {
return nil
}
runtimes := make([]*ExecutableEvidence, 0, len(entries))
for _, entry := range entries {
if !entry.IsDir() {
continue
}
runtime, err := detectModLoaderInstallFromVersionDir(filepath.Join(libraryRoot, entry.Name()), spec, hashLookup)
if err != nil || runtime == nil {
continue
}
runtimes = append(runtimes, runtime)
}
return runtimes
}
func lookupModLoaderArtifactHash(version string, candidate modLoaderCandidate, mavenBaseURL string) (bool, error) {
sha1URL := fmt.Sprintf("%s/%s/%s.sha1", mavenBaseURL, version, filepath.Base(candidate.path))
if ok, err := verifyArtifactHash(candidate.path, sha1URL, cache.HashSHA1); ok || err != nil {
return ok, err
}
sha256URL := fmt.Sprintf("%s/%s/%s.sha256", mavenBaseURL, version, filepath.Base(candidate.path))
return verifyArtifactHash(candidate.path, sha256URL, cache.HashSHA256)
}
func verifyArtifactHash(filePath string, checksumURL string, algo cache.HashAlgorithm) (bool, error) {
data, err := util.CachedGetBytes(checksumURL, util.BytesRequestOptions{Kind: cache.KindMetadata, MaxBytes: 256})
if err != nil {
return false, nil
}
expected := strings.TrimSpace(string(data))
if expected == "" {
return false, nil
}
actual, err := hashArtifactFile(filePath, algo)
if err != nil {
return false, err
}
return strings.EqualFold(actual, expected), nil
}
func hashArtifactFile(filePath string, algo cache.HashAlgorithm) (string, error) {
data, err := os.ReadFile(filePath)
if err != nil {
return "", err
}
switch algo {
case cache.HashSHA1:
sum := sha1.Sum(data)
return hex.EncodeToString(sum[:]), nil
case cache.HashSHA256:
sum := sha256.Sum256(data)
return hex.EncodeToString(sum[:]), nil
default:
return "", fmt.Errorf("unsupported artifact hash algorithm: %s", algo)
}
}
func buildModLoaderRuntimeInfo(platform types.Platform, name string, filePath string, gameVersion types.RawVersion, loaderVersion types.RawVersion) *ExecutableEvidence {
capability := types.CapabilityForgeMods
if platform == types.PlatformNeoforge {
capability = types.CapabilityNeoforgeMods
}
return &ExecutableEvidence{
PrimaryEntrance: filePath,
GameVersion: gameVersion,
RuntimeIdentities: []types.PackageId{
{Platform: platform, Name: types.ProjectName(name), Version: loaderVersion},
{Platform: types.PlatformMinecraft, Name: "minecraft", Version: gameVersion},
},
Topology: &types.RuntimeTopology{
PrimaryNode: types.RuntimeNodeID(name),
Nodes: []types.RuntimeNode{{
ID: types.RuntimeNodeID(name),
Role: types.RuntimeRoleModLoader,
Capabilities: []types.RuntimeCapability{capability},
}},
},
}
}
package detector
import (
"archive/zip"
"bufio"
"bytes"
"encoding/json"
"io"
"os"
"strings"
externaltype "github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
"github.com/pelletier/go-toml"
)
// neoforgeServerDetector detects NeoForge servers
type neoforgeServerDetector struct{}
func (d *neoforgeServerDetector) Name() string {
return "neoforge server"
}
func (d *neoforgeServerDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (*ExecutableEvidence, error) {
neoforgeLoaderVersion := types.VersionUnknown
gameVersion := types.VersionUnknown
// Single pass through manifest for both game version and classpath
for _, f := range zipReader.File {
if f.Name == "META-INF/MANIFEST.MF" {
r, err := f.Open()
if err != nil {
continue
}
defer tools.CloseReader(r, logger.Warn)
var classPaths []string
s := bufio.NewScanner(r)
for s.Scan() {
line := s.Text()
// Parse game version from manifest
if line == "Specification-Title: Minecraft" {
// the n+2 line contains the version
if !s.Scan() {
continue
}
if !s.Scan() {
continue
}
line := s.Text()
if after, found := strings.CutPrefix(
line,
"Specification-Version: ",
); found {
gameVersion = types.RawVersion(after)
}
}
// Parse Class-Path for NeoForge classpath entry
if after, found := strings.CutPrefix(
line,
"Class-Path: ",
); found {
classPathsStr := after
for s.Scan() && !strings.Contains(s.Text(), ":") {
line := s.Text()
line = strings.TrimSpace(line)
classPathsStr += line
}
classPaths = strings.Split(classPathsStr, " ")
}
}
// Primary detection signal: NeoForge classpath entry
for _, path := range classPaths {
if after, found := strings.CutPrefix(
path,
"libraries/net/neoforged/neoforge/",
); found {
neoforgeLoaderVersion = types.RawVersion(
strings.Split(after, "/")[0],
)
break
}
}
break
}
}
// Return nil if primary NeoForge signal not found
if !hasConcreteVersion(neoforgeLoaderVersion) {
return nil, nil
}
// Build and return result (gameVersion may be VersionUnknown if not in manifest)
exec := &ExecutableEvidence{
PrimaryEntrance: filePath,
GameVersion: gameVersion,
RuntimeIdentities: []types.PackageId{
{
Platform: types.PlatformNeoforge,
Name: syntax.ToProjectName("neoforge"),
Version: neoforgeLoaderVersion,
},
{
Platform: types.PlatformMinecraft,
Name: syntax.ToProjectName("minecraft"),
Version: gameVersion,
},
},
Topology: &types.RuntimeTopology{
PrimaryNode: "neoforge",
Nodes: []types.RuntimeNode{
{
ID: "neoforge",
Role: types.RuntimeRoleModLoader,
Capabilities: []types.RuntimeCapability{types.CapabilityNeoforgeMods},
},
},
},
}
return exec, nil
}
// neoforgeModDetector detects NeoForge mods
type neoforgeModDetector struct{}
func (d *neoforgeModDetector) Name() string {
return "neoforge mod"
}
func (d *neoforgeModDetector) Detect(
zipReader *zip.Reader,
fileHandle *os.File,
) (packages []types.Package, err error) {
// Read jarjar metadata once; used for both embedded modId set and dep list.
jarjarMeta := readJarjarMeta(zipReader)
embeddedModIds := jarjarEmbeddedModIds(zipReader, jarjarMeta)
for _, f := range zipReader.File {
if f.Name == "META-INF/neoforge.mods.toml" {
r, err := f.Open()
if err != nil {
return nil, err
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
modIdentifier := &externaltype.FileModLoaderIdentifier{}
err = toml.Unmarshal(data, modIdentifier)
if err != nil {
return nil, err
}
for _, mod := range modIdentifier.Mods {
// Skip the neoforge mod itself
// It will be handled by the executable detector separately
if mod.ModID == "neoforge" {
continue
}
// Version
version := types.RawVersion(mod.Version)
if version == "${file.jarVersion}" {
version = getForgeModVersion(zipReader)
}
// Parse as internal id
p := types.Package{
Id: types.PackageId{
Platform: types.PlatformNeoforge,
Name: syntax.ToProjectName(mod.ModID),
Version: version,
},
Local: &types.PackageInstallation{
Path: fileHandle.Name(),
},
Dependencies: &types.PackageDependencies{},
Information: &types.ProjectInformation{},
}
// Parse dependencies
//
// This provides an authentic information (rather than a remote).
// The file is exactly what the loader checks for.
//
// Unexpected mod behavior is not our concern. Later we will
// add manual dependency/conflict management features.
deps := modIdentifier.Dependencies[mod.ModID]
for _, dep := range deps {
if dep.Type == "incompatible" {
continue
}
if strings.EqualFold(dep.Side, "CLIENT") {
continue
}
switch dep.ModID {
case "neoforge", "forge", "minecraft", "java":
continue
}
p.Dependencies.Value = append(
p.Dependencies.Value,
types.Dependency{
Id: types.PackageId{
Platform: types.PlatformNeoforge,
Name: syntax.ToProjectName(dep.ModID),
},
Constraint: parseModLoaderMavenVersionRange(dep.VersionRange),
Mandatory: dep.Type == "required" || dep.Mandatory,
Embedded: embeddedModIds[dep.ModID],
},
)
}
// Parse info
p.Information = &types.ProjectInformation{
Title: mod.DisplayName,
Brief: mod.Description,
Authors: []types.Person{{Name: mod.Authors}},
License: modIdentifier.License,
Urls: []types.Url{
{
Name: "URL",
Type: types.UrlHome,
Url: mod.DisplayURL,
},
{
Name: "Issue Tracker",
Type: types.UrlIssues,
Url: modIdentifier.IssueTrackerURL,
},
},
}
// Append JarInJar embedded library dependencies from
// META-INF/jarjar/metadata.json if present.
// Reference: https://docs.neoforged.net/toolchain/docs/dependencies/jarinjar/
embedded := jarjarEmbeddedDeps(jarjarMeta)
p.Dependencies.Value = append(p.Dependencies.Value, embedded...)
packages = append(packages, p)
}
}
}
return packages, nil
}
// readJarjarMeta parses META-INF/jarjar/metadata.json from a NeoForge mod JAR.
// Returns nil if the file is absent or cannot be parsed.
//
// Reference: https://docs.neoforged.net/toolchain/docs/dependencies/jarinjar/
func readJarjarMeta(zipReader *zip.Reader) *externaltype.FileNeoforgeJarjar {
for _, f := range zipReader.File {
if f.Name != "META-INF/jarjar/metadata.json" {
continue
}
r, err := f.Open()
if err != nil {
logger.Warn(err)
return nil
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
logger.Warn(err)
return nil
}
var meta externaltype.FileNeoforgeJarjar
if err := json.Unmarshal(data, &meta); err != nil {
logger.Warn(err)
return nil
}
return &meta
}
return nil
}
// jarjarEmbeddedModIds returns the set of NeoForge modIds that are physically
// bundled inside the JAR via JarInJar. It does this by opening each embedded
// nested JAR listed in the jarjar metadata and reading its neoforge.mods.toml.
//
// This is how the NeoForge mod loader itself resolves which modId a JarInJar
// entry satisfies: it reads the embedded JAR's mods.toml, not the artifact name.
func jarjarEmbeddedModIds(zipReader *zip.Reader, meta *externaltype.FileNeoforgeJarjar) map[string]bool {
if meta == nil {
return nil
}
// Index the outer ZIP entries by name for O(1) lookup.
byName := make(map[string]*zip.File, len(zipReader.File))
for _, f := range zipReader.File {
byName[f.Name] = f
}
modIds := make(map[string]bool)
for _, entry := range meta.Jars {
f, ok := byName[entry.Path]
if !ok {
continue
}
// Read the embedded JAR bytes into memory so we can open it as a zip.
rc, err := f.Open()
if err != nil {
logger.Warn(err)
continue
}
jarBytes, err := io.ReadAll(rc)
tools.CloseReader(rc, logger.Warn)
if err != nil {
logger.Warn(err)
continue
}
nestedZip, err := zip.NewReader(bytes.NewReader(jarBytes), int64(len(jarBytes)))
if err != nil {
logger.Warn(err)
continue
}
for _, nf := range nestedZip.File {
if nf.Name != "META-INF/neoforge.mods.toml" {
continue
}
nr, err := nf.Open()
if err != nil {
logger.Warn(err)
break
}
tomlData, err := io.ReadAll(nr)
tools.CloseReader(nr, logger.Warn)
if err != nil {
logger.Warn(err)
break
}
var inner externaltype.FileModLoaderIdentifier
if err := toml.Unmarshal(tomlData, &inner); err != nil {
logger.Warn(err)
break
}
for _, mod := range inner.Mods {
if mod.ModID != "" {
modIds[mod.ModID] = true
}
}
break
}
}
return modIds
}
// jarjarEmbeddedDeps converts jarjar metadata into Dependency entries with
// Embedded=true, using Maven group:artifact as the synthetic package name.
func jarjarEmbeddedDeps(meta *externaltype.FileNeoforgeJarjar) []types.Dependency {
if meta == nil {
return nil
}
deps := make([]types.Dependency, 0, len(meta.Jars))
for _, entry := range meta.Jars {
name := syntax.ToProjectName(entry.Identifier.Group + ":" + entry.Identifier.Artifact)
deps = append(deps, types.Dependency{
Id: types.PackageId{
Platform: types.PlatformNone,
Name: name,
},
Constraint: parseModLoaderMavenVersionRange(entry.Version.Range),
Mandatory: true,
Embedded: true,
})
}
return deps
}
func init() {
registerExecutableDetector(&neoforgeServerDetector{})
registerModDetector(&neoforgeModDetector{})
}
package detector
import (
"archive/zip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/mclucy/lucy/types"
)
// neoForgeMavenArtifactBaseURL is the Maven base URL for NeoForge artifacts.
// Source: https://maven.neoforged.net/releases/net/neoforged/neoforge/
const neoForgeMavenArtifactBaseURL = "https://maven.neoforged.net/releases/net/neoforged/neoforge"
var neoforgeArtifactHashLookup = func(version string, artifact modLoaderArtifactKind, filePath string) (bool, error) {
return lookupModLoaderArtifactHash(version, modLoaderCandidate{kind: artifact, path: filePath}, neoForgeMavenArtifactBaseURL)
}
// NeoForgeInstallationRuntimes scans libraries/net/neoforged/neoforge/ for installed
// NeoForge server artifacts and returns detected runtime infos.
//
// Detection order:
// 1. Maven .sha1 / .sha256 hash verification
// 2. Unpack-based content verification
//
// References:
// - https://maven.neoforged.net/releases/net/neoforged/neoforge/
// - https://docs.neoforged.net/user/docs/server/
// - https://github.com/neoforged/NeoForge/blob/main/CHANGELOG.md
func NeoForgeInstallationRuntimes(workPath string) []*ExecutableEvidence {
spec := modLoaderInstallSpec{
platform: types.PlatformNeoforge,
name: "neoforge",
libraryRoot: filepath.Join("libraries", "net", "neoforged", "neoforge"),
mavenBaseURL: neoForgeMavenArtifactBaseURL,
candidateNames: neoForgeCandidateNames,
unpackVerify: verifyNeoForgeArtifactByUnpack,
}
return modLoaderInstallationRuntimes(workPath, spec, neoforgeArtifactHashLookup)
}
func neoForgeCandidateNames(versionDir, version string) []modLoaderCandidate {
return []modLoaderCandidate{
{kind: modLoaderArtifactServer, path: filepath.Join(versionDir, fmt.Sprintf("neoforge-%s-server.jar", version))},
{kind: modLoaderArtifactUniversal, path: filepath.Join(versionDir, fmt.Sprintf("neoforge-%s-universal.jar", version))},
{kind: modLoaderArtifactShim, path: filepath.Join(versionDir, fmt.Sprintf("neoforge-%s-shim.jar", version))},
}
}
func verifyNeoForgeArtifactByUnpack(
candidate modLoaderCandidate,
gameVersion types.RawVersion,
loaderVersion types.RawVersion,
) (bool, error) {
file, err := os.Open(candidate.path)
if err != nil {
return false, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return false, err
}
reader, err := zip.NewReader(file, stat.Size())
if err != nil {
return false, err
}
switch candidate.kind {
case modLoaderArtifactUniversal:
return verifyNeoForgeUniversalManifest(reader, loaderVersion)
case modLoaderArtifactServer:
return forgeHasSibling(candidate.path, "run.sh", "run.bat"), nil
default:
return false, nil
}
}
func verifyNeoForgeUniversalManifest(reader *zip.Reader, loaderVersion types.RawVersion) (bool, error) {
manifest, ok, err := readZipFile(reader, "META-INF/MANIFEST.MF")
if err != nil || !ok {
return false, err
}
if strings.Contains(manifest, "Specification-Title: neoforge") {
return true, nil
}
classpathEntry := fmt.Sprintf("libraries/net/neoforged/neoforge/%s/", loaderVersion)
return strings.Contains(manifest, classpathEntry), nil
}
func readZipFile(reader *zip.Reader, name string) (string, bool, error) {
for _, file := range reader.File {
if file.Name != name {
continue
}
r, err := file.Open()
if err != nil {
return "", false, err
}
defer r.Close()
data, err := io.ReadAll(r)
if err != nil {
return "", false, err
}
return string(data), true, nil
}
return "", false, nil
}
package detector
import "github.com/mclucy/lucy/types"
type paperFamilyResult uint8
const (
paperFamilyUnknown paperFamilyResult = iota
familyStrong
familyWeak
familyMiss
familyContradiction
)
type paperBrandResult uint8
const (
paperBrandUnknown paperBrandResult = iota
brandPaper
brandFork
brandUnknown
brandContradiction
)
type paperObservations struct {
hasPaperClasses bool
hasSpigotClasses bool
officialDistribution bool
detectedBrand string
gameVersion types.RawVersion
metaMainClass string
librariesListEntries []string
versionsListEntries []string
patchesListEntries []string
downloadContext string
versionJSONID types.RawVersion
patchProperties map[string]string
hasPaperMCPatch bool
buildInfo string
leavesclipVersion string
manifestMainClass string
manifestSpecificationTitle string
manifestSpecificationVendor string
manifestImplementationTitle string
manifestImplementationVendor string
manifestImplementationVer string
hasLeaperNamespace bool
hasLeavesclipNamespace bool
hasPaperclipNamespace bool
hasLegacyPaperclipNamespace bool
hasYouerNamespace bool
}
type paperJudgment struct {
bukkitConfirmed bool
observations paperObservations
familyResult paperFamilyResult
brandResult paperBrandResult
brandName string
contradictionState string
fastPathUsed bool
reasons []string
}
func newPaperJudgment() paperJudgment {
return paperJudgment{
familyResult: paperFamilyUnknown,
brandResult: paperBrandUnknown,
reasons: make([]string, 0, 8),
}
}
func (j *paperJudgment) addReason(reason string) {
if reason == "" {
return
}
j.reasons = append(j.reasons, reason)
}
package detector
import (
"archive/zip"
"encoding/json"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"github.com/mclucy/lucy/types"
)
const (
paperMetaMainClassPath = "META-INF/main-class"
paperLibrariesListPath = "META-INF/libraries.list"
paperVersionsListPath = "META-INF/versions.list"
paperPatchesListPath = "META-INF/patches.list"
paperDownloadContextPath = "META-INF/download-context"
paperVersionJSONPath = "version.json"
paperPatchPropertiesPath = "patch.properties"
paperPatchReaperToken = "paperMC.patch"
paperPatchFilePath = paperPatchReaperToken
paperBuildInfoPath = "META-INF/build-info"
paperLeavesclipVersionPath = "META-INF/leavesclip-version"
paperLeaperNamespacePrefix = "cn/dreeam/leaper/"
paperLeavesclipNamespace = "org/leavesmc/leavesclip/"
paperPaperclipNamespace = "io/papermc/paperclip/"
paperLegacyPaperclipPrefix = "paperclip/"
paperYouerNamespace = "com/mohistmc/launcher/youer/"
paperManifestYouerToken = "Youer"
paperMainClassYouerToken = "youer"
paperLibraryPaperToken = "io.papermc.paper:paper-api:"
paperLibraryFoliaToken = "dev.folia:folia-api:"
paperLibraryDivineToken = "org.bxteam.divinemc:divinemc-api:"
paperLibraryPurpurToken = "org.purpurmc.purpur:purpur-api:"
paperLibraryLeafToken = "cn.dreeam.leaf:leaf-api:"
paperLibraryLeavesToken = "org.leavesmc.leaves:leaves-api:"
)
type paperObservationEntry struct {
name string
read func() ([]byte, error)
}
type paperObservationSource interface {
Walk(func(paperObservationEntry) error) error
}
func extractPaperObservations(
filePath string,
zipReader *zip.Reader,
) (paperObservations, error) {
obs := paperObservations{
patchProperties: make(map[string]string),
}
source, err := newPaperObservationSource(filePath, zipReader)
if err != nil {
return paperObservations{}, err
}
err = source.Walk(func(entry paperObservationEntry) error {
name := entry.name
switch {
case strings.HasPrefix(name, bukkitPaperClassPrefix), strings.HasPrefix(name, bukkitLegacyPaperClassPrefix):
obs.hasPaperClasses = true
case strings.HasPrefix(name, bukkitSpigotClassPrefix):
obs.hasSpigotClasses = true
case strings.HasPrefix(name, paperLeaperNamespacePrefix):
obs.hasLeaperNamespace = true
case strings.HasPrefix(name, paperLeavesclipNamespace):
obs.hasLeavesclipNamespace = true
case strings.HasPrefix(name, paperPaperclipNamespace):
obs.hasPaperclipNamespace = true
case strings.HasPrefix(name, paperLegacyPaperclipPrefix):
obs.hasLegacyPaperclipNamespace = true
case strings.HasPrefix(name, paperYouerNamespace):
obs.hasYouerNamespace = true
}
switch name {
case bukkitManifestPath:
data, err := entry.read()
if err != nil {
return err
}
signals := parseBukkitManifest(data)
obs.manifestMainClass = signals.mainClass
obs.manifestSpecificationTitle = signals.specificationTitle
obs.manifestSpecificationVendor = signals.specificationVendor
obs.manifestImplementationTitle = signals.implementationTitle
obs.manifestImplementationVendor = signals.implementationVendor
obs.manifestImplementationVer = signals.implementationVer
case paperMetaMainClassPath:
data, err := entry.read()
if err != nil {
return err
}
obs.metaMainClass = strings.TrimSpace(string(data))
case paperLibrariesListPath:
data, err := entry.read()
if err != nil {
return err
}
obs.librariesListEntries = readObservationLines(data)
case paperVersionsListPath:
data, err := entry.read()
if err != nil {
return err
}
obs.versionsListEntries = readObservationLines(data)
case paperPatchesListPath:
data, err := entry.read()
if err != nil {
return err
}
obs.patchesListEntries = readObservationLines(data)
case paperDownloadContextPath:
data, err := entry.read()
if err != nil {
return err
}
obs.downloadContext = strings.TrimSpace(string(data))
case paperVersionJSONPath:
data, err := entry.read()
if err != nil {
return err
}
obs.versionJSONID = parsePaperVersionJSONID(data)
case paperPatchPropertiesPath:
data, err := entry.read()
if err != nil {
return err
}
obs.patchProperties = parsePaperPatchProperties(data)
obs.hasPaperMCPatch = obs.hasPaperMCPatch || strings.EqualFold(obs.patchProperties["patch"], paperPatchFilePath)
case paperPatchFilePath:
obs.hasPaperMCPatch = true
case paperBuildInfoPath:
data, err := entry.read()
if err != nil {
return err
}
obs.buildInfo = strings.TrimSpace(string(data))
case paperLeavesclipVersionPath:
data, err := entry.read()
if err != nil {
return err
}
obs.leavesclipVersion = strings.TrimSpace(string(data))
}
return nil
})
if err != nil {
return paperObservations{}, err
}
obs.gameVersion = inferPaperObservationGameVersion(obs)
return obs, nil
}
func newPaperObservationSource(
filePath string,
zipReader *zip.Reader,
) (paperObservationSource, error) {
if zipReader != nil {
return paperObservationZipSource{reader: zipReader}, nil
}
info, err := os.Stat(filePath)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("paper observation path is not a directory: %s", filePath)
}
return paperObservationDirSource{root: filePath}, nil
}
type paperObservationZipSource struct {
reader *zip.Reader
}
func (s paperObservationZipSource) Walk(fn func(paperObservationEntry) error) error {
for _, file := range s.reader.File {
if file.FileInfo().IsDir() {
continue
}
zipFile := file
if err := fn(paperObservationEntry{
name: filepath.ToSlash(zipFile.Name),
read: func() ([]byte, error) {
r, err := zipFile.Open()
if err != nil {
return nil, err
}
defer r.Close()
return io.ReadAll(r)
},
}); err != nil {
return err
}
}
return nil
}
type paperObservationDirSource struct {
root string
}
func (s paperObservationDirSource) Walk(fn func(paperObservationEntry) error) error {
return filepath.WalkDir(s.root, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
rel, err := filepath.Rel(s.root, path)
if err != nil {
return err
}
absolutePath := path
return fn(paperObservationEntry{
name: filepath.ToSlash(rel),
read: func() ([]byte, error) {
return os.ReadFile(absolutePath)
},
})
})
}
func readObservationLines(data []byte) []string {
trimmed := strings.TrimSpace(string(data))
if trimmed == "" {
return nil
}
return strings.Split(trimmed, "\n")
}
func parsePaperVersionJSONID(data []byte) types.RawVersion {
var payload struct {
ID string `json:"id"`
}
if err := json.Unmarshal(data, &payload); err != nil {
return types.VersionUnknown
}
if !isMinecraftReleaseVersion(payload.ID) {
return types.VersionUnknown
}
return types.RawVersion(payload.ID)
}
func parsePaperPatchProperties(data []byte) map[string]string {
properties := make(map[string]string)
for _, line := range readObservationLines(data) {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
key, value, ok := strings.Cut(line, "=")
if !ok {
continue
}
properties[strings.TrimSpace(key)] = strings.TrimSpace(value)
}
return properties
}
func inferPaperObservationGameVersion(obs paperObservations) types.RawVersion {
if hasConcreteVersion(obs.versionJSONID) {
return obs.versionJSONID
}
if version := parseBukkitGameVersion(obs.manifestImplementationVer); hasConcreteVersion(version) {
return version
}
if version := parsePaperVersionList(obs.versionsListEntries); hasConcreteVersion(version) {
return version
}
if version := parsePaperPatchPropertiesVersion(obs.patchProperties); hasConcreteVersion(version) {
return version
}
return types.VersionUnknown
}
func parsePaperVersionList(entries []string) types.RawVersion {
for _, entry := range entries {
fields := strings.Split(entry, "\t")
if len(fields) < 2 {
continue
}
candidate := strings.TrimSpace(fields[1])
if isMinecraftReleaseVersion(candidate) {
return types.RawVersion(candidate)
}
}
return types.VersionUnknown
}
func parsePaperPatchPropertiesVersion(properties map[string]string) types.RawVersion {
version := strings.TrimSpace(properties["version"])
if !isMinecraftReleaseVersion(version) {
return types.VersionUnknown
}
return types.RawVersion(version)
}
package detector
// detectorRegistry manages registered detectors
type detectorRegistry struct {
executableDetectors []ExecutableDetector
jarPackageDetectors []PackageDetector
otherPackageDetectors map[string]PackageDetector
environmentDetectors []EnvironmentDetector
}
// Global registry instance
var registry = &detectorRegistry{
executableDetectors: make([]ExecutableDetector, 0),
jarPackageDetectors: make([]PackageDetector, 0),
otherPackageDetectors: make(map[string]PackageDetector),
environmentDetectors: make([]EnvironmentDetector, 0),
}
// registerExecutableDetector adds a new executable detector to the registry
func registerExecutableDetector(detector ExecutableDetector) {
registry.executableDetectors = append(
registry.executableDetectors,
detector,
)
}
// registerModDetector adds a new mod detector to the registry
func registerModDetector(detector PackageDetector) {
registry.jarPackageDetectors = append(
registry.jarPackageDetectors,
detector,
)
}
func registerOtherPackageDetector(detector PackageDetector) {
registry.otherPackageDetectors[detector.Name()] = detector
}
// registerEnvironmentDetector adds a new environment detector to the registry
func registerEnvironmentDetector(detector EnvironmentDetector) {
registry.environmentDetectors = append(
registry.environmentDetectors,
detector,
)
}
// getExecutableDetectors returns all registered executable detectors
func getExecutableDetectors() []ExecutableDetector {
return registry.executableDetectors
}
// getModDetectors returns all registered mod detectors
func getModDetectors() []PackageDetector {
return registry.jarPackageDetectors
}
func getOtherPackageDetectors() map[string]PackageDetector {
return registry.otherPackageDetectors
}
// getEnvironmentDetectors returns all registered environment detectors
func getEnvironmentDetectors() []EnvironmentDetector {
return registry.environmentDetectors
}
package detector
import (
"archive/zip"
"encoding/json"
"io"
"os"
"strings"
"github.com/mclucy/lucy/dependency"
externaltype "github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
const spongePluginMetadataPath = "META-INF/sponge_plugins.json"
type spongeDetector struct{}
func newSpongeDetector() *spongeDetector {
return &spongeDetector{}
}
func (d *spongeDetector) Name() string {
return "sponge plugin"
}
func (d *spongeDetector) Detect(
zipReader *zip.Reader,
fileHandle *os.File,
) ([]types.Package, error) {
for _, f := range zipReader.File {
if f.Name != spongePluginMetadataPath {
continue
}
r, err := f.Open()
if err != nil {
return nil, err
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
var metadata externaltype.FileSpongePluginsIdentifier
if err := json.Unmarshal(data, &metadata); err != nil {
return nil, err
}
if !validSpongeMetadata(&metadata) {
return []types.Package{}, nil
}
packages := make([]types.Package, 0, len(metadata.Plugins))
for _, plugin := range metadata.Plugins {
pkg, ok := translateSpongePlugin(&metadata, plugin, fileHandle.Name())
if !ok {
continue
}
packages = append(packages, pkg)
}
if len(packages) == 0 {
return []types.Package{}, nil
}
return packages, nil
}
return nil, nil
}
func validSpongeMetadata(metadata *externaltype.FileSpongePluginsIdentifier) bool {
if strings.TrimSpace(metadata.Loader.Name) == "" ||
strings.TrimSpace(metadata.Loader.Version) == "" ||
len(metadata.Plugins) == 0 {
return false
}
for _, plugin := range metadata.Plugins {
if hasConcreteSpongePluginIdentity(metadata, plugin) {
return true
}
}
return false
}
func hasConcreteSpongePluginIdentity(
metadata *externaltype.FileSpongePluginsIdentifier,
plugin externaltype.FileSpongePluginMetadata,
) bool {
if strings.TrimSpace(plugin.ID) == "" || strings.TrimSpace(plugin.Entrypoint) == "" {
return false
}
return strings.TrimSpace(resolveSpongePluginVersion(metadata, plugin)) != ""
}
func translateSpongePlugin(
metadata *externaltype.FileSpongePluginsIdentifier,
plugin externaltype.FileSpongePluginMetadata,
localPath string,
) (types.Package, bool) {
if !hasConcreteSpongePluginIdentity(metadata, plugin) {
return types.Package{}, false
}
version := resolveSpongePluginVersion(metadata, plugin)
pkg := types.Package{
Id: types.PackageId{
Platform: types.PlatformAny,
Name: syntax.ToProjectName(plugin.ID),
Version: types.RawVersion(version),
},
Local: &types.PackageInstallation{Path: localPath},
Dependencies: &types.PackageDependencies{
Value: translateSpongeDependencies(resolveSpongePluginDependencies(metadata, plugin)),
},
Information: &types.ProjectInformation{
Title: tools.Ternary(plugin.Name != "", plugin.Name, plugin.ID),
Brief: plugin.Description,
License: metadata.License,
Authors: translateSpongeContributors(resolveSpongePluginContributors(metadata, plugin)),
Urls: translateSpongeLinks(resolveSpongePluginLinks(metadata, plugin)),
},
}
return pkg, true
}
func resolveSpongePluginVersion(
metadata *externaltype.FileSpongePluginsIdentifier,
plugin externaltype.FileSpongePluginMetadata,
) string {
if version := strings.TrimSpace(plugin.Version); version != "" {
return version
}
return strings.TrimSpace(metadata.Global.Version)
}
func resolveSpongePluginLinks(
metadata *externaltype.FileSpongePluginsIdentifier,
plugin externaltype.FileSpongePluginMetadata,
) struct {
Homepage string
Source string
Issues string
} {
links := struct {
Homepage string
Source string
Issues string
}{
Homepage: metadata.Global.Links.Homepage,
Source: metadata.Global.Links.Source,
Issues: metadata.Global.Links.Issues,
}
if strings.TrimSpace(plugin.Links.Homepage) != "" {
links.Homepage = plugin.Links.Homepage
}
if strings.TrimSpace(plugin.Links.Source) != "" {
links.Source = plugin.Links.Source
}
if strings.TrimSpace(plugin.Links.Issues) != "" {
links.Issues = plugin.Links.Issues
}
return links
}
func resolveSpongePluginContributors(
metadata *externaltype.FileSpongePluginsIdentifier,
plugin externaltype.FileSpongePluginMetadata,
) []struct {
Name string
Description string
} {
contributors := metadata.Global.Contributors
if len(plugin.Contributors) > 0 {
contributors = plugin.Contributors
}
resolved := make([]struct {
Name string
Description string
}, 0, len(contributors))
for _, contributor := range contributors {
resolved = append(resolved, struct {
Name string
Description string
}{
Name: contributor.Name,
Description: contributor.Description,
})
}
return resolved
}
func resolveSpongePluginDependencies(
metadata *externaltype.FileSpongePluginsIdentifier,
plugin externaltype.FileSpongePluginMetadata,
) []struct {
ID string
Version string
LoadOrder string
Optional bool
} {
deps := metadata.Global.Dependencies
if len(plugin.Dependencies) > 0 {
deps = plugin.Dependencies
}
resolved := make([]struct {
ID string
Version string
LoadOrder string
Optional bool
}, 0, len(deps))
for _, dep := range deps {
resolved = append(resolved, struct {
ID string
Version string
LoadOrder string
Optional bool
}{
ID: dep.ID,
Version: dep.Version,
LoadOrder: dep.LoadOrder,
Optional: dep.Optional,
})
}
return resolved
}
func translateSpongeContributors(
contributors []struct {
Name string
Description string
},
) []types.Person {
people := make([]types.Person, 0, len(contributors))
for _, contributor := range contributors {
if strings.TrimSpace(contributor.Name) == "" {
continue
}
people = append(people, types.Person{
Name: contributor.Name,
Role: contributor.Description,
})
}
return people
}
func translateSpongeLinks(
links struct {
Homepage string
Source string
Issues string
},
) []types.Url {
urls := make([]types.Url, 0, 3)
if homepage := strings.TrimSpace(links.Homepage); homepage != "" {
urls = append(urls, types.Url{Name: "Homepage", Type: types.UrlHome, Url: homepage})
}
if source := strings.TrimSpace(links.Source); source != "" {
urls = append(urls, types.Url{Name: "Source", Type: types.UrlSource, Url: source})
}
if issues := strings.TrimSpace(links.Issues); issues != "" {
urls = append(urls, types.Url{Name: "Issues", Type: types.UrlIssues, Url: issues})
}
return urls
}
func translateSpongeDependencies(
deps []struct {
ID string
Version string
LoadOrder string
Optional bool
},
) []types.Dependency {
translated := make([]types.Dependency, 0, len(deps))
for _, dep := range deps {
id := strings.TrimSpace(dep.ID)
version := strings.TrimSpace(dep.Version)
if id == "" || version == "" || strings.EqualFold(id, "spongeapi") {
continue
}
translated = append(translated, types.Dependency{
Id: types.PackageId{
Platform: types.PlatformAny,
Name: syntax.ToProjectName(id),
},
Constraint: dependency.ParseRange(
version,
dependency.DialectMavenRange,
types.Semver,
),
Mandatory: !dep.Optional,
})
}
return translated
}
func init() {
registerModDetector(newSpongeDetector())
}
package detector
import (
"io"
"os"
"strings"
"github.com/mclucy/lucy/types"
)
// analyzeForgeArgFile parses Forge argument files to extract version information
// This is a helper function used by ForgeDetector
func analyzeForgeArgFile(file *os.File) (
forgeVersion types.RawVersion,
mcVersion types.RawVersion,
) {
data, _ := io.ReadAll(file)
s := string(data)
lines := strings.Split(s, "\n")
for _, line := range lines {
if strings.HasPrefix(line, "--fml.forgeVersion") {
split := strings.Split(line, " ")
if len(split) == 2 {
forgeVersion = types.RawVersion(split[1])
continue
}
forgeVersion = types.VersionUnknown
}
if strings.HasPrefix(line, "--fml.mcVersion") {
split := strings.Split(line, " ")
if len(split) == 2 {
mcVersion = types.RawVersion(split[1])
continue
}
mcVersion = types.VersionUnknown
}
}
return forgeVersion, mcVersion
}
package detector
import (
"archive/zip"
"encoding/json"
"io"
"os"
"github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
// VanillaDetector detects vanilla Minecraft servers
type VanillaDetector struct{}
func (d *VanillaDetector) Name() string {
return "vanilla server"
}
func (d *VanillaDetector) Detect(
filePath string,
zipReader *zip.Reader,
fileHandle *os.File,
) (*ExecutableEvidence, error) {
for _, f := range zipReader.File {
if f.Name == "version.json" {
r, err := f.Open()
if err != nil {
return nil, err
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
// This is to guard against misidentifying Forge installer jars as
// vanilla servers, which also contain version.json but with different
// structure
forgeInstallerGuard := &struct {
Comment []string `json:"_comment"`
MainClass string `json:"mainClass"`
}{}
err = json.Unmarshal(data, forgeInstallerGuard)
if err == nil {
return nil, nil
}
obj := exttype.FileMinecraftVersionSpec{}
err = json.Unmarshal(data, &obj)
if err != nil {
return nil, err
}
gameVersion := types.RawVersion(obj.Id)
exec := &ExecutableEvidence{
PrimaryEntrance: filePath,
GameVersion: gameVersion,
RuntimeIdentities: []types.PackageId{
{
Platform: types.PlatformMinecraft,
Name: "minecraft",
Version: gameVersion,
},
},
Topology: &types.RuntimeTopology{
PrimaryNode: "minecraft",
Nodes: []types.RuntimeNode{
{
ID: "minecraft",
Role: types.RuntimeRoleVanilla,
},
},
},
}
return exec, nil
}
}
return nil, nil
}
func init() {
registerExecutableDetector(&VanillaDetector{})
}
package detector
import (
"archive/zip"
"encoding/json"
"io"
"os"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
"gopkg.in/yaml.v3"
)
type velocityDetector struct{}
type bungeecordDetector struct{}
type velocityPluginDescriptor struct {
ID string `json:"id"`
Name string `json:"name"`
Version string `json:"version"`
Description string `json:"description"`
Authors []string `json:"authors"`
URL string `json:"url"`
}
type bungeecordPluginDescriptor struct {
Name string `yaml:"name"`
Version string `yaml:"version"`
Description string `yaml:"description"`
Author string `yaml:"author"`
Authors []string `yaml:"authors"`
Website string `yaml:"website"`
}
func newVelocityDetector() PackageDetector {
return &velocityDetector{}
}
func newBungeecordDetector() PackageDetector {
return &bungeecordDetector{}
}
func (d *velocityDetector) Name() string {
return "velocity plugin"
}
func (d *bungeecordDetector) Name() string {
return "bungeecord plugin"
}
func (d *velocityDetector) Detect(
zipReader *zip.Reader,
fileHandle *os.File,
) ([]types.Package, error) {
for _, f := range zipReader.File {
if f.Name != "velocity-plugin.json" {
continue
}
r, err := f.Open()
if err != nil {
return nil, err
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
descriptor := &velocityPluginDescriptor{}
if err := json.Unmarshal(data, descriptor); err != nil {
return nil, err
}
return []types.Package{translateVelocityPlugin(descriptor, fileHandle.Name())}, nil
}
return nil, nil
}
func (d *bungeecordDetector) Detect(
zipReader *zip.Reader,
fileHandle *os.File,
) ([]types.Package, error) {
for _, f := range zipReader.File {
if f.Name != "bungee.yml" {
continue
}
r, err := f.Open()
if err != nil {
return nil, err
}
defer tools.CloseReader(r, logger.Warn)
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
descriptor := &bungeecordPluginDescriptor{}
if err := yaml.Unmarshal(data, descriptor); err != nil {
return nil, err
}
return []types.Package{translateBungeecordPlugin(descriptor, fileHandle.Name())}, nil
}
return nil, nil
}
func translateVelocityPlugin(
descriptor *velocityPluginDescriptor,
localPath string,
) types.Package {
authors := make([]types.Person, 0, len(descriptor.Authors))
for _, author := range descriptor.Authors {
authors = append(authors, types.Person{Name: author})
}
urls := make([]types.Url, 0, 1)
if descriptor.URL != "" {
urls = append(urls, types.Url{
Name: "Homepage",
Type: types.UrlHome,
Url: descriptor.URL,
})
}
return types.Package{
Id: types.PackageId{
Platform: types.Platform("velocity"),
Name: syntax.ToProjectName(descriptor.ID),
Version: types.RawVersion(descriptor.Version),
},
Local: &types.PackageInstallation{
Path: localPath,
},
Information: &types.ProjectInformation{
Title: descriptor.Name,
Description: descriptor.Description,
Authors: authors,
Urls: urls,
},
}
}
func translateBungeecordPlugin(
descriptor *bungeecordPluginDescriptor,
localPath string,
) types.Package {
authors := make([]types.Person, 0, len(descriptor.Authors)+1)
if descriptor.Author != "" {
authors = append(authors, types.Person{Name: descriptor.Author})
}
for _, author := range descriptor.Authors {
authors = append(authors, types.Person{Name: author})
}
urls := make([]types.Url, 0, 1)
if descriptor.Website != "" {
urls = append(urls, types.Url{
Name: "Website",
Type: types.UrlHome,
Url: descriptor.Website,
})
}
return types.Package{
Id: types.PackageId{
Platform: types.Platform("bungeecord"),
Name: syntax.ToProjectName(descriptor.Name),
Version: types.RawVersion(descriptor.Version),
},
Local: &types.PackageInstallation{
Path: localPath,
},
Information: &types.ProjectInformation{
Title: descriptor.Name,
Description: descriptor.Description,
Authors: authors,
Urls: urls,
},
}
}
func init() {
registerModDetector(newVelocityDetector())
registerModDetector(newBungeecordDetector())
}
package topology
import "github.com/mclucy/lucy/types"
type ConnectionDefinition struct {
Source ConnectionSource
TargetNodeID types.RuntimeNodeID
Kind types.RuntimeEdgeVerb
}
func (d ConnectionDefinition) EdgeFrom(sourceNodeID types.RuntimeNodeID) types.RuntimeEdge {
return types.RuntimeEdge{
From: sourceNodeID,
To: d.TargetNodeID,
Verb: d.Kind,
}
}
package topology
import (
"sort"
"github.com/mclucy/lucy/types"
)
type ConnectionRegistry struct {
byNodeID map[types.RuntimeNodeID][]ConnectionDefinition
byCapability map[types.RuntimeCapability][]ConnectionDefinition
}
var DefaultConnectionRegistry = NewConnectionRegistry(nil)
func NewConnectionRegistry(definitions []ConnectionDefinition) ConnectionRegistry {
registry := ConnectionRegistry{
byNodeID: make(map[types.RuntimeNodeID][]ConnectionDefinition),
byCapability: make(map[types.RuntimeCapability][]ConnectionDefinition),
}
for _, definition := range definitions {
stored := cloneConnectionDefinition(definition)
switch stored.Source.Type {
case ConnectionSourceNode:
registry.byNodeID[stored.Source.NodeID] = append(
registry.byNodeID[stored.Source.NodeID],
stored,
)
case ConnectionSourceCapability:
registry.byCapability[stored.Source.Capability] = append(
registry.byCapability[stored.Source.Capability],
stored,
)
}
}
for nodeID := range registry.byNodeID {
sortConnectionDefinitions(registry.byNodeID[nodeID])
}
for capability := range registry.byCapability {
sortConnectionDefinitions(registry.byCapability[capability])
}
return registry
}
func (r ConnectionRegistry) LookupByNodeID(id types.RuntimeNodeID) []ConnectionDefinition {
definitions := r.byNodeID[id]
if len(definitions) == 0 {
return nil
}
return cloneConnectionDefinitions(definitions)
}
func (r ConnectionRegistry) LookupByCapability(capability types.RuntimeCapability) []ConnectionDefinition {
definitions := r.byCapability[capability]
if len(definitions) == 0 {
return nil
}
return cloneConnectionDefinitions(definitions)
}
func cloneConnectionDefinitions(definitions []ConnectionDefinition) []ConnectionDefinition {
cloned := make([]ConnectionDefinition, 0, len(definitions))
for _, definition := range definitions {
cloned = append(cloned, cloneConnectionDefinition(definition))
}
return cloned
}
func cloneConnectionDefinition(definition ConnectionDefinition) ConnectionDefinition {
return ConnectionDefinition{
Source: ConnectionSource{
Type: definition.Source.Type,
NodeID: definition.Source.NodeID,
Capability: definition.Source.Capability,
},
TargetNodeID: definition.TargetNodeID,
Kind: definition.Kind,
}
}
func sortConnectionDefinitions(definitions []ConnectionDefinition) {
sort.Slice(definitions, func(i, j int) bool {
if definitions[i].TargetNodeID != definitions[j].TargetNodeID {
return string(definitions[i].TargetNodeID) < string(definitions[j].TargetNodeID)
}
return string(definitions[i].Kind) < string(definitions[j].Kind)
})
}
package topology
import "github.com/mclucy/lucy/types"
type ConnectionSourceType string
const (
ConnectionSourceNode ConnectionSourceType = "node"
ConnectionSourceCapability ConnectionSourceType = "capability"
)
type ConnectionSource struct {
Type ConnectionSourceType
NodeID types.RuntimeNodeID
Capability types.RuntimeCapability
}
func SourceNode(id types.RuntimeNodeID) ConnectionSource {
return ConnectionSource{
Type: ConnectionSourceNode,
NodeID: id,
}
}
func SourceCapability(capability types.RuntimeCapability) ConnectionSource {
return ConnectionSource{
Type: ConnectionSourceCapability,
Capability: capability,
}
}
// Package probe provides functionality to gather and manage server information
// for a Minecraft server. It includes methods to retrieve server configuration,
// mod list, executable information, and other relevant details. The package
// utilizes memoization to avoid redundant calculations and resolve any data
// dependencies issues. Therefore, all probe functions are 100% concurrent-safe.
//
// The main exposed function is ServerInfo, which returns a comprehensive
// ServerInfo struct containing all the gathered information. To avoid side
// effects, the ServerInfo struct is returned as a copy, rather than reference.
package probe
import (
"os"
"path"
"path/filepath"
"sync"
"github.com/mclucy/lucy/exttype"
"github.com/mclucy/lucy/probe/internal/detector"
"gopkg.in/ini.v1"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
var (
serverInfoMu sync.RWMutex
serverInfoCache types.ServerInfo
serverInfoReady bool
resetProbeExecCache = func() {}
resetProbeFileLockCache = func() {}
)
// ServerInfo is the exposed function for external packages to get serverInfo.
// The value is cached after the first build, and read access is blocked while
// Rebuild refreshes the cache.
func ServerInfo() types.ServerInfo {
serverInfoMu.RLock()
if serverInfoReady {
cached := serverInfoCache
serverInfoMu.RUnlock()
return cached
}
serverInfoMu.RUnlock()
serverInfoMu.Lock()
defer serverInfoMu.Unlock()
if !serverInfoReady {
resetProbeMemoizedStateLocked()
serverInfoCache = buildServerInfo()
serverInfoReady = true
}
return serverInfoCache
}
// Rebuild forces ServerInfo to be regenerated and blocks all readers while
// rebuilding.
func Rebuild() {
serverInfoMu.Lock()
defer serverInfoMu.Unlock()
resetProbeMemoizedStateLocked()
serverInfoCache = buildServerInfo()
serverInfoReady = true
}
// InvalidateServerInfo marks the cached ServerInfo as stale so the next call
// to ServerInfo() will re-probe the server state. This is useful after
// installing packages (e.g., identity packages like Fabric) to refresh the
// topology cache without forcing an immediate rebuild.
func InvalidateServerInfo() {
serverInfoMu.Lock()
defer serverInfoMu.Unlock()
serverInfoReady = false
}
// ServerInfoAt probes an explicit working directory without replacing the
// current process-global ServerInfo cache. This is intended for init-style
// takeover discovery where the caller may need rich observed state for a target
// directory that is not the current process working directory.
func ServerInfoAt(workDir string) types.ServerInfo {
serverInfoMu.Lock()
defer serverInfoMu.Unlock()
return buildServerInfoAtLocked(workDir, false)
}
// RefreshServerInfo refreshes probed state for workDir. When workDir matches
// the current process working directory, this rebuilds the shared cache so
// future ServerInfo() calls observe the new state. Otherwise it performs an ad
// hoc reprobe and returns the refreshed observation without mutating the shared
// cache.
func RefreshServerInfo(workDir string) types.ServerInfo {
serverInfoMu.Lock()
defer serverInfoMu.Unlock()
return buildServerInfoAtLocked(workDir, true)
}
// DetectPackages analyzes a local artifact file and returns packages detected
// from its embedded metadata.
func DetectPackages(filePath string) []types.Package {
return detector.Packages(filePath)
}
func resetProbeMemoizedStateLocked() {
modPaths = tools.Memoize(buildModPaths)
getEnvironment = tools.Memoize(buildEnvironment)
workPath = tools.Memoize(buildWorkPath)
serverProperties = tools.Memoize(buildServerProperties)
savePath = tools.Memoize(buildSavePath)
installedPackages = tools.Memoize(buildInstalledPackages)
resetProbeExecCache()
resetProbeFileLockCache()
}
func buildServerInfoAtLocked(workDir string, persistWhenCurrent bool) types.ServerInfo {
target, err := filepath.Abs(workDir)
if err != nil {
return types.ServerInfo{}
}
originalWD, err := os.Getwd()
if err != nil {
return types.ServerInfo{}
}
originalTarget, err := filepath.Abs(originalWD)
if err != nil {
return types.ServerInfo{}
}
savedCache := serverInfoCache
savedReady := serverInfoReady
shouldRestoreCache := true
defer func() {
resetProbeMemoizedStateLocked()
if shouldRestoreCache {
serverInfoCache = savedCache
serverInfoReady = savedReady
}
}()
if err := os.Chdir(target); err != nil {
return types.ServerInfo{}
}
defer func() {
_ = os.Chdir(originalWD)
}()
resetProbeMemoizedStateLocked()
info := buildServerInfo()
if persistWhenCurrent && sameProbePath(target, originalTarget) {
serverInfoCache = info
serverInfoReady = true
shouldRestoreCache = false
}
return info
}
func sameProbePath(left, right string) bool {
leftEval, leftErr := filepath.EvalSymlinks(left)
if leftErr != nil {
leftEval = left
}
rightEval, rightErr := filepath.EvalSymlinks(right)
if rightErr != nil {
rightEval = right
}
return leftEval == rightEval
}
// buildServerInfo builds the server information by performing several checks
// and gathering data from various sources. It uses goroutines to perform these
// tasks concurrently and a sync.Mutex to ensure thread-safe updates to the
// serverInfo struct.
func buildServerInfo() types.ServerInfo {
var wg sync.WaitGroup
var mu sync.Mutex
var serverInfo types.ServerInfo
// Environment stage
wg.Add(1)
go func() {
defer wg.Done()
env := getEnvironment()
mu.Lock()
serverInfo.Environments = env
mu.Unlock()
}()
// Server Work Path
wg.Add(1)
go func() {
defer wg.Done()
workPath := workPath()
mu.Lock()
serverInfo.WorkPath = workPath
mu.Unlock()
}()
// Executable Stage
wg.Add(1)
go func() {
defer wg.Done()
executable := getExecutableInfo()
mu.Lock()
serverInfo.Runtime = executable
mu.Unlock()
}()
// Mod Path
wg.Add(1)
go func() {
defer wg.Done()
modPath := modPaths()
mu.Lock()
serverInfo.ModPath = modPath
mu.Unlock()
}()
// Mod List
wg.Add(1)
go func() {
defer wg.Done()
packages := installedPackages()
mu.Lock()
serverInfo.Packages = packages
mu.Unlock()
}()
// Save Path
wg.Add(1)
go func() {
defer wg.Done()
savePath := savePath()
mu.Lock()
serverInfo.SavePath = savePath
mu.Unlock()
}()
// TODO: Check for .lucy path
// However, the local installation method is not determined yet, so this is
// just a placeholder for now.
// Check if the server is running
wg.Add(1)
go func() {
defer wg.Done()
activity := checkServerFileLock()
mu.Lock()
serverInfo.Activity = activity
mu.Unlock()
}()
wg.Wait()
serverInfo.Packages = finalizeProbedRuntime(serverInfo.Runtime, serverInfo.Packages)
return serverInfo
}
// Some functions that gets a single piece of information. They are not exported,
// as ServerInfo() applies a memoization mechanism. Every time a serverInfo
// is needed, just call ServerInfo() without the concern of redundant calculation.
func buildModPaths() (paths []string) {
exec := getExecutableInfo()
if exec == nil {
return
}
return packageSearchPaths(exec, workPath())
}
var modPaths = tools.Memoize(buildModPaths)
func buildEnvironment() types.EnvironmentInfo {
return detector.Environment(".")
}
var getEnvironment = tools.Memoize(buildEnvironment)
func buildWorkPath() string {
env := getEnvironment()
if env.Mcdr != nil {
return env.Mcdr.Config.WorkingDirectory
}
return "."
}
var workPath = tools.Memoize(buildWorkPath)
func buildServerProperties() exttype.FileMinecraftServerProperties {
exec := getExecutableInfo()
propertiesPath := path.Join(workPath(), "server.properties")
file, err := ini.Load(propertiesPath)
if err != nil {
if exec != types.UnknownExecutable {
logger.Info("this server is missing a server.properties")
}
return nil
}
properties := make(map[string]string)
for _, section := range file.Sections() {
for _, key := range section.Keys() {
properties[key.Name()] = key.String()
}
}
return properties
}
var serverProperties = tools.Memoize(buildServerProperties)
func buildSavePath() string {
serverProperties := serverProperties()
if serverProperties == nil {
return ""
}
levelName := serverProperties["level-name"]
return path.Join(workPath(), levelName)
}
var savePath = tools.Memoize(buildSavePath)
func buildInstalledPackages() (mods []types.Package) {
idx := NewPackageIndex()
var mu sync.Mutex
paths := modPaths()
for _, modPath := range paths {
jarFiles, err := findJar(modPath)
if err != nil {
logger.Warn(err)
logger.Info("cannot read the mod directory")
continue
}
var wg sync.WaitGroup
for _, jarPath := range jarFiles {
wg.Add(1)
go func(path string) {
defer wg.Done()
analyzed := detector.Packages(path)
if analyzed == nil {
return
}
mu.Lock()
idx.Merge(analyzed)
mu.Unlock()
}(jarPath)
}
wg.Wait()
}
env := getEnvironment()
if env.Mcdr != nil {
for _, dir := range env.Mcdr.Config.PluginDirectories {
pluginFiles, err := findFileWithExt(dir, ".pyz", ".mcdr")
if err != nil {
logger.Warn(err)
logger.Info("cannot read the MCDR plugin directory")
continue
}
for _, pluginFile := range pluginFiles {
analyzed := detector.McdrPlugin(pluginFile)
if analyzed != nil {
idx.Merge(analyzed)
}
}
}
}
return idx.Packages()
}
var installedPackages = tools.Memoize(buildInstalledPackages)
package probe
import (
"fmt"
"os"
"path"
"slices"
"strings"
"sync"
"sync/atomic"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/probe/internal/detector"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
"charm.land/huh/v2"
)
const noteIgnorePath = "Some modding platforms are located from the libraries directory. " +
"You might want to look at the platform and version, rather than the path."
const multiThreadThreshold = 10
// getExecutableInfo uses the new detector-based architecture to find server executables
func buildExecutableInfo() *types.RuntimeInfo {
valid := make([]*detector.ExecutableEvidence, 0)
workPath := workPath()
for _, evidence := range detector.ForgeInstallationRuntimes(workPath) {
valid = append(valid, evidence)
}
for _, evidence := range detector.NeoForgeInstallationRuntimes(workPath) {
valid = append(valid, evidence)
}
// Layered search
// 1. pwd
// Proceed to step 2 no matter the result
jars, err := findJar(workPath)
if err != nil {
logger.Warn(fmt.Errorf("cannot read server directory: %w", err))
}
for _, jar := range jars {
candidates := detector.Executable(jar)
if candidates == nil || candidates.IsEmpty() || candidates.IsAmbiguous() {
continue
}
valid = append(valid, candidates.Single())
}
// 2. Forge/Fabric installation paths
// Will break after found
fabricLib := path.Join(
workPath, "libraries", "net", "fabricmc", "fabric-loader",
)
forgeLib := path.Join(
workPath, "libraries", "net", "minecraftforge", "forge",
)
var forgeJars, fabricJars []string
if stat, err := os.Stat(fabricLib); err == nil && stat.IsDir() {
fabricJars, err = findJar(fabricLib)
if err != nil {
logger.Warn(fmt.Errorf("cannot read fabric libraries: %w", err))
}
}
if len(valid) == 0 {
if stat, err := os.Stat(forgeLib); err == nil && stat.IsDir() {
forgeJars, err = findJar(forgeLib)
if err != nil {
logger.Warn(fmt.Errorf("cannot read forge libraries: %w", err))
}
}
}
jars = slices.Concat(forgeJars, fabricJars)
for _, jar := range jars {
candidates := detector.Executable(jar)
if candidates == nil || candidates.IsEmpty() || candidates.IsAmbiguous() {
continue
}
valid = append(valid, candidates.Single())
}
// 3. Everything under libraries
if len(valid) == 0 {
logger.Info("no valid jar found yet, trying to find under libraries")
jarPaths := findJarRecursive(path.Join(workPath, "libraries"))
if len(jarPaths) >= multiThreadThreshold {
mu := sync.Mutex{}
wg := sync.WaitGroup{}
for _, jarPath := range jarPaths {
wg.Add(1)
go func(jarPath string) {
candidates := detector.Executable(jarPath)
if candidates == nil || candidates.IsEmpty() || candidates.IsAmbiguous() {
wg.Done()
return
}
mu.Lock()
valid = append(valid, candidates.Single())
mu.Unlock()
wg.Done()
}(jarPath)
}
wg.Wait()
} else {
for _, jarPath := range jarPaths {
candidates := detector.Executable(jarPath)
if candidates == nil || candidates.IsEmpty() || candidates.IsAmbiguous() {
continue
}
valid = append(valid, candidates.Single())
}
}
}
// 4. pwd, recursively
// Prompt before do so due to the potential large number of files
// TODO: Implement
switch len(valid) {
case 0:
logger.Info("no server executable found")
return types.NoExecutable
case 1:
return materializeRuntimeInfo(valid[0])
default:
runtimes := make([]*types.RuntimeInfo, 0, len(valid))
for _, evidence := range valid {
runtimes = append(runtimes, materializeRuntimeInfo(evidence))
}
choice := promptSelectExecutable(
runtimes, []string{noteIgnorePath},
)
return materializeRuntimeInfo(valid[choice])
}
}
var getExecutableInfo = tools.Memoize(buildExecutableInfo)
func init() {
resetProbeExecCache = func() {
getExecutableInfo = tools.Memoize(buildExecutableInfo)
}
}
func promptSelectExecutable(
executables []*types.RuntimeInfo,
notes []string,
) int {
selection := 0
title := "Multiple possible executables detected, select one"
noteText := strings.TrimSpace(generateNotes(notes...))
if noteText != "" {
title = title + "\n" + noteText
}
options := make([]huh.Option[int], 0, len(executables))
for i, exec := range executables {
options = append(options, huh.NewOption(executableLabel(exec), i))
}
form := huh.NewForm(
huh.NewGroup(
huh.NewSelect[int]().
Title(title).
Options(options...).
Filtering(true).
Height(10).
Value(&selection),
),
)
if err := form.Run(); err != nil {
logger.ShowWarn(err)
}
return selection
}
func generateNotes(notes ...string) string {
var note strings.Builder
for _, n := range notes {
note.WriteString(tools.Cyan("*"))
note.WriteString(" ")
note.WriteString(n)
note.WriteString("\n")
}
return note.String()
}
func executableLabel(executable *types.RuntimeInfo) string {
return tools.Bold(executable.PrimaryEntrance) + " " + tools.Dim(executableAnnotation(executable))
}
func executableAnnotation(executable *types.RuntimeInfo) string {
gameVersion := executable.GameVersion.String()
derivedPlatform := executable.DerivedModLoader()
if derivedPlatform == types.PlatformMinecraft {
return fmt.Sprintf("(Minecraft %s, Vanilla)", gameVersion)
}
return fmt.Sprintf(
"(Minecraft %s, %s %s)",
gameVersion,
derivedPlatform.Title(),
executable.DerivedLoaderVersion(),
)
}
func findJar(dir ...string) (jarFiles []string, err error) {
jarFiles = []string{}
for _, d := range dir {
files, err := findFileWithExt(d, ".jar")
if err != nil {
return nil, err
}
jarFiles = append(jarFiles, files...)
}
return jarFiles, nil
}
func findFileWithExt(dir string, ext ...string) (files []string, err error) {
files = []string{}
entries, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
if tools.Exists(ext, path.Ext(entry.Name())) {
files = append(files, path.Join(dir, entry.Name()))
}
}
return files, nil
}
const fileCountThreshold = 50000
func findJarRecursive(dir string) (jarFiles []string) {
jarFiles = []string{}
entries, _ := os.ReadDir(dir)
var wg sync.WaitGroup
var fileCount int32
var mu sync.Mutex
// TODO: Use semaphore to limit the number of goroutines
for _, entry := range entries {
if atomic.LoadInt32(&fileCount) >= fileCountThreshold {
logger.Info("file count threshold reached, stopping search")
break
}
if entry.IsDir() {
wg.Add(1)
go func(subDir string) {
defer wg.Done()
subJarFiles := findJarRecursive(subDir)
mu.Lock()
jarFiles = append(jarFiles, subJarFiles...)
mu.Unlock()
}(path.Join(dir, entry.Name()))
} else {
atomic.AddInt32(&fileCount, 1)
if path.Ext(entry.Name()) == ".jar" {
mu.Lock()
jarFiles = append(jarFiles, path.Join(dir, entry.Name()))
mu.Unlock()
}
}
}
wg.Wait()
return
}
//go:build unix || darwin || linux
/*
Copyright 2024 4rcadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package probe
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"path"
"strconv"
"strings"
"syscall"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
func buildServerFileLockStatus() *types.ServerActivity {
inactive := &types.ServerActivity{Active: false, Pid: 0}
if savePath() == "" {
return inactive
}
lockPath := path.Join(
savePath(),
"session.lock",
)
// Try lsof before using the file lock check. As the file lock check is
// tested to be unstable on linux (Ubuntu 20.04, Linux 5.15.0-48-generic).
pid, _ := lsof(lockPath)
if pid != 0 {
return &types.ServerActivity{Active: true, Pid: pid}
}
file, err := os.OpenFile(lockPath, os.O_RDWR|os.O_APPEND, 0o666)
defer tools.CloseReader(file, logger.Warn)
if err != nil && errors.Is(err, os.ErrNotExist) {
return inactive
} else if err != nil {
return nil
}
logger.Debug("checking lock on: " + file.Name())
err = syscall.Flock(int(file.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
if err != nil && errors.Is(err, syscall.EWOULDBLOCK) {
logger.Debug("found a lock on the file: " + err.Error())
fl := syscall.Flock_t{
Type: syscall.F_WRLCK,
}
err = syscall.FcntlFlock(file.Fd(), syscall.F_GETLK, &fl)
logger.Warn(
fmt.Errorf("activity detected but cannot get pid: %w", err),
)
if err != nil {
return &types.ServerActivity{Active: true, Pid: 0}
}
return &types.ServerActivity{Active: true, Pid: int(fl.Pid)}
} else if err != nil {
return nil
}
logger.Debug("no lock found on the file: " + file.Name())
err = syscall.Flock(int(file.Fd()), syscall.LOCK_UN)
if err != nil {
logger.Warn(err)
}
return inactive
}
var checkServerFileLock = tools.Memoize(buildServerFileLockStatus)
func init() {
resetProbeFileLockCache = func() {
checkServerFileLock = tools.Memoize(buildServerFileLockStatus)
}
}
func lsof(filePath string) (pid int, err error) {
cmd := exec.Command("lsof", filePath)
var out bytes.Buffer
cmd.Stdout = &out
err = cmd.Run()
if err != nil {
return 0, err
}
logger.Debug("got output from lsof:\n" + out.String())
lines := strings.Split(out.String(), "\n")
outputBegin := 0
for i, line := range lines {
if strings.Contains(line, "COMMAND") {
outputBegin = i + 1
break
}
}
for _, line := range lines[outputBegin:] {
if line == "" {
continue
}
fields := strings.Fields(line)
if len(fields) < 2 {
continue
}
if fields[0] == "java" {
return strconv.Atoi(fields[1])
}
}
return 0, nil
}
package probe
import (
"path"
"github.com/mclucy/lucy/types"
)
func finalizeProbedRuntime(
runtime *types.RuntimeInfo,
packages []types.Package,
) []types.Package {
EnrichTopologyFromPackages(runtime, packages)
ensureRuntimeTopology(runtime)
return packagesWithRuntimeIdentities(packages, runtime)
}
func ensureRuntimeTopology(runtime *types.RuntimeInfo) {
if runtime == nil || runtime.Topology != nil {
return
}
runtime.Topology = &types.RuntimeTopology{}
}
func packagesWithRuntimeIdentities(
packages []types.Package,
runtime *types.RuntimeInfo,
) []types.Package {
if runtime == nil || !runtime.IsValid() {
return packages
}
idx := NewPackageIndex()
idx.Merge(packages)
for _, rid := range runtime.RuntimeIdentities {
if rid.Platform == types.PlatformAny {
continue
}
idx.Add(types.Package{Id: rid})
}
return idx.Packages()
}
func packageSearchPaths(runtime *types.RuntimeInfo, workingDirectory string) []string {
if runtime == nil {
return nil
}
return packageSearchPathsForTopology(runtime.Topology, workingDirectory)
}
func packageSearchPathsForTopology(
topology *types.RuntimeTopology,
workingDirectory string,
) (paths []string) {
if topology == nil || !topology.Resolved() {
return nil
}
if topology.HasCapability(types.CapabilityFabricMods) ||
topology.HasCapability(types.CapabilityForgeMods) ||
topology.HasCapability(types.CapabilityNeoforgeMods) {
paths = append(paths, path.Join(workingDirectory, "mods"))
}
if topology.HasCapability(types.CapabilityBukkitPlugins) {
paths = append(paths, path.Join(workingDirectory, "plugins"))
}
return paths
}
package probe
import (
"sort"
"github.com/mclucy/lucy/types"
)
// PackageIndex is a map-backed package indexing utility that provides
// deterministic, sorted access to a collection of packages. It deduplicates
// packages by their full identifier (PackageId.StringFull()) and guarantees
// that all exported methods return results in a stable, deterministic order.
//
// PackageIndex does NOT expose raw map iteration order to any caller.
type PackageIndex struct {
pkgs map[string]types.Package
}
// NewPackageIndex creates a new, empty PackageIndex ready for use.
func NewPackageIndex() *PackageIndex {
return &PackageIndex{
pkgs: make(map[string]types.Package),
}
}
// Add inserts a package into the index with the following dedupe policy:
//
// - First-write wins: if a package with the same full ID already exists, the
// new entry is ignored.
// - EXCEPTION: if the existing entry has an empty Local.Path (i.e., it was
// discovered without a local installation path) AND the new package has a
// non-empty Local.Path, the new package replaces the existing one. This
// allows local-path enrichment to take precedence over remote-only entries.
//
// The dedupe key is pkg.Id.StringFull(), which encodes platform/name@version.
func (idx *PackageIndex) Add(pkg types.Package) {
key := pkg.Id.StringFull()
existing, exists := idx.pkgs[key]
if exists {
// First-write wins, UNLESS the existing entry lacks a local path
// and the incoming package provides one — in that case, the new
// entry replaces the old so that local-path information is preserved.
existingPath := ""
if existing.Local != nil {
existingPath = existing.Local.Path
}
newPath := ""
if pkg.Local != nil {
newPath = pkg.Local.Path
}
if existingPath != "" || newPath == "" {
return
}
}
idx.pkgs[key] = pkg
}
// Merge bulk-adds a slice of packages into the index. Each package is subject
// to the same dedupe policy as Add.
func (idx *PackageIndex) Merge(pkgs []types.Package) {
for _, pkg := range pkgs {
idx.Add(pkg)
}
}
// Packages returns a deterministic sorted projection of all indexed packages.
// The sort order is ascending by:
// 1. Platform (string)
// 2. Name (string)
// 3. Version (string)
//
// This method never exposes map iteration order; results are always sorted.
func (idx *PackageIndex) Packages() []types.Package {
result := make([]types.Package, 0, len(idx.pkgs))
for _, pkg := range idx.pkgs {
result = append(result, pkg)
}
sort.Slice(result, func(i, j int) bool {
pi, pj := result[i].Id, result[j].Id
if pi.Platform != pj.Platform {
return pi.Platform.String() < pj.Platform.String()
}
if pi.Name != pj.Name {
return pi.Name.String() < pj.Name.String()
}
return pi.Version.String() < pj.Version.String()
})
return result
}
// LookupByID performs an exact lookup by the full package identifier
// (PackageId.StringFull()). Returns the package and true if found, or a zero
// Package and false otherwise.
func (idx *PackageIndex) LookupByID(id types.PackageId) (types.Package, bool) {
pkg, ok := idx.pkgs[id.StringFull()]
return pkg, ok
}
// LookupByPlatformName returns all packages matching the given platform and
// name, sorted by Version (string ascending). If no packages match, returns
// nil.
//
// This method never exposes map iteration order; results are always sorted.
func (idx *PackageIndex) LookupByPlatformName(platform types.Platform, name string) []types.Package {
var matches []types.Package
for _, pkg := range idx.pkgs {
if pkg.Id.Platform == platform && pkg.Id.Name.String() == name {
matches = append(matches, pkg)
}
}
if len(matches) == 0 {
return nil
}
sort.Slice(matches, func(i, j int) bool {
return matches[i].Id.Version.String() < matches[j].Id.Version.String()
})
return matches
}
package probe
import (
"strings"
"github.com/mclucy/lucy/probe/internal/detector"
"github.com/mclucy/lucy/types"
)
func materializeRuntimeInfo(evidence *detector.ExecutableEvidence) *types.RuntimeInfo {
if evidence == nil {
return nil
}
return &types.RuntimeInfo{
PrimaryEntrance: evidence.PrimaryEntrance,
GameVersion: evidence.GameVersion,
BootCommand: nil,
Topology: materializeRuntimeTopology(evidence),
RuntimeIdentities: append([]types.PackageId(nil), evidence.RuntimeIdentities...),
BridgeHints: append([]string(nil), evidence.BridgeHints...),
}
}
func materializeRuntimeTopology(
evidence *detector.ExecutableEvidence,
) *types.RuntimeTopology {
if evidence == nil {
return nil
}
if evidence.Topology != nil {
return cloneRuntimeTopology(evidence.Topology)
}
if evidence.TopologySeed != nil {
return &types.RuntimeTopology{
PrimaryNode: evidence.TopologySeed.PrimaryNode,
Nodes: append([]types.RuntimeNode(nil), evidence.TopologySeed.Nodes...),
Edges: append([]types.RuntimeEdge(nil), evidence.TopologySeed.Edges...),
}
}
for _, identity := range evidence.RuntimeIdentities {
nodeID, ok := RuntimeIdentityNode(identity)
if !ok {
continue
}
entry, ok := FindEntry(nodeID)
if !ok {
continue
}
return BuildTopologyFromEntry(entry)
}
return nil
}
func RuntimeIdentityNode(identity types.PackageId) (types.RuntimeNodeID, bool) {
switch strings.ToLower(strings.TrimSpace(string(identity.Name))) {
case "fabric", "fabric-loader":
return RuntimeNodeFabric, true
case "forge":
return RuntimeNodeForge, true
case "neoforge":
return RuntimeNodeNeoforge, true
case "mcdreforged", "mcdr":
return RuntimeNodeMCDR, true
case "minecraft", "mc":
return RuntimeNodeMinecraft, true
default:
return "", false
}
}
func cloneRuntimeTopology(topology *types.RuntimeTopology) *types.RuntimeTopology {
if topology == nil {
return nil
}
return &types.RuntimeTopology{
PrimaryNode: topology.PrimaryNode,
Nodes: append([]types.RuntimeNode(nil), topology.Nodes...),
Edges: append([]types.RuntimeEdge(nil), topology.Edges...),
}
}
package probe
import (
"sort"
"strings"
internaltopology "github.com/mclucy/lucy/probe/internal/topology"
"github.com/mclucy/lucy/types"
)
// =============================================================================
// EVIDENCE PRECEDENCE POLICY
//
// This is the single authoritative definition of how conflicting or coexisting
// detection evidence is resolved. All conflict-resolution logic must trace back
// to this block. Never add ad-hoc precedence rules elsewhere.
//
// ECOSYSTEM FAMILIES (mutually exclusive for a single JAR)
//
// Tier 1 – Authoritative proxy descriptors:
// velocity → velocity-plugin.json (Velocity-specific)
// bungeecord → bungee.yml (BungeeCord-specific)
//
// Tier 1 – Authoritative server/plugin descriptors:
// bukkit → plugin.yml (Paper-family generic)
// paper → paper-plugin.yml (Paper-modern specific)
// leaves → leaves-plugin.json (Leaves-specific)
// folia → plugin.yml + folia-supported:true
//
// Tier 1 – Authoritative sponge descriptor:
// sponge → META-INF/sponge_plugins.json
//
// Tier 2 – Generic (no ecosystem proof without Tier 1):
// plugin.yml alone → bukkit family only; never implies proxy membership
//
// CONFLICT RULE
// If Tier-1 signals from two DIFFERENT incompatible ecosystem families are
// detected in the same JAR (e.g., velocity-plugin.json AND bungee.yml, or
// bungee.yml AND plugin.yml), the result for that JAR is unresolved/empty.
// We never guess which ecosystem wins. Unresolved is always safer than wrong.
//
// INTRA-FAMILY NOTE
// Within the Paper-family detector, descriptor precedence is already handled
// by leaves-plugin.json > paper-plugin.yml > plugin.yml (early return wins).
// That is NOT a conflict – it is expected descriptor layering within one JAR.
//
// IMPLEMENTATION
// The conflict check is applied in detector_aggregator.go::Packages(), which
// is the single point where all jar-detector results are merged. If the merged
// platform set spans two incompatible ecosystem families, Packages() returns
// nil, causing the caller to treat the JAR as having no recognized packages.
// =============================================================================
func EnrichTopologyFromPackages(
exec *types.RuntimeInfo,
packages []types.Package,
) {
if exec == nil {
return
}
evidence := detectedRuntimeEvidence(packages)
evidence = append(evidence, detectedRuntimeEvidenceFromHints(exec.BridgeHints)...)
if exec.Topology == nil {
// No topology yet — attempt to build one from package evidence.
if len(evidence) == 0 {
exec.Topology = &types.RuntimeTopology{}
return
}
if inferred := inferHostTopologyFromAttachedBridgePackages(packages); inferred != nil {
exec.Topology = inferred
} else {
// Build from the first evidence node, merge the rest.
firstEntry, ok := FindEntry(evidence[0])
if !ok {
exec.Topology = &types.RuntimeTopology{}
return
}
exec.Topology = BuildTopologyFromEntry(firstEntry)
if exec.Topology == nil {
exec.Topology = &types.RuntimeTopology{}
return
}
}
for _, nodeID := range evidence {
entry, ok := FindEntry(nodeID)
if !ok {
continue
}
annotation := BuildTopologyFromEntry(entry)
if annotation == nil {
continue
}
mergeTopology(exec.Topology, annotation)
}
applyDeclarativeConnections(exec.Topology, internaltopology.DefaultConnectionRegistry)
NormalizeTopology(exec.Topology)
FoldTopologyRisk(exec.Topology)
return
}
// Topology exists (resolved or not) — enrich with package evidence.
// This is additive annotation: bridge/adaptor evidence like Connector augments
// the existing host runtime topology without replacing the current primary
// runtime identity (for example, NeoForge remains the primary node).
for _, nodeID := range evidence {
entry, ok := FindEntry(nodeID)
if !ok {
continue
}
annotation := BuildTopologyFromEntry(entry)
if annotation == nil {
continue
}
mergeTopology(exec.Topology, annotation)
}
applyDeclarativeConnections(exec.Topology, internaltopology.DefaultConnectionRegistry)
NormalizeTopology(exec.Topology)
FoldTopologyRisk(exec.Topology)
}
func applyDeclarativeConnections(
t *types.RuntimeTopology,
registry internaltopology.ConnectionRegistry,
) {
if t == nil {
return
}
seenNodes := make(map[types.RuntimeNodeID]struct{}, len(t.Nodes))
queue := make([]types.RuntimeNode, 0, len(t.Nodes))
for _, node := range t.Nodes {
if _, seen := seenNodes[node.ID]; seen {
continue
}
seenNodes[node.ID] = struct{}{}
queue = append(queue, node)
}
seenEdges := make(map[types.RuntimeEdge]struct{}, len(t.Edges))
for _, edge := range t.Edges {
seenEdges[edge] = struct{}{}
}
for len(queue) > 0 {
node := queue[0]
queue = queue[1:]
definitions := registry.LookupByNodeID(node.ID)
for _, capability := range node.Capabilities {
definitions = append(definitions, registry.LookupByCapability(capability)...)
}
for _, definition := range definitions {
edge := definition.EdgeFrom(node.ID)
if _, seen := seenEdges[edge]; seen {
continue
}
if _, exists := seenNodes[definition.TargetNodeID]; !exists {
entry, ok := FindEntry(definition.TargetNodeID)
if !ok {
continue
}
targetNode := types.RuntimeNode{
ID: entry.NodeID,
Role: entry.Role,
Capabilities: append(
[]types.RuntimeCapability(nil),
entry.Capabilities...,
),
RiskLevel: entry.RiskLevel,
}
t.Nodes = append(t.Nodes, targetNode)
seenNodes[targetNode.ID] = struct{}{}
queue = append(queue, targetNode)
}
t.Edges = append(t.Edges, edge)
seenEdges[edge] = struct{}{}
}
}
}
func detectedRuntimeEvidence(packages []types.Package) []types.RuntimeNodeID {
names := make(map[string]struct{}, len(packages))
for _, pkg := range packages {
normalized := strings.ToLower(strings.TrimSpace(pkg.Id.Name.String()))
if normalized == "" {
continue
}
names[normalized] = struct{}{}
}
detected := make([]types.RuntimeNodeID, 0, 6)
if hasAnyName(names, "sinytra-connector") {
detected = append(detected, RuntimeNodeConnector)
}
if hasAnyName(names, "kilt") {
detected = append(detected, RuntimeNodeKilt)
}
if hasAnyName(names, "velocity") {
detected = append(detected, RuntimeNodeVelocity)
}
if hasAnyName(names, "bungeecord") {
detected = append(detected, RuntimeNodeBungeecord)
}
if hasAnyName(names, "waterfall") {
detected = append(detected, RuntimeNodeWaterfall)
}
if hasAnyName(names, "geyser", "geyser-spigot", "geyser-fabric") {
detected = append(detected, RuntimeNodeGeyser)
}
if hasAnyName(names, "arclight") {
detected = append(detected, RuntimeNodeArclight)
}
return detected
}
func inferHostTopologyFromAttachedBridgePackages(
packages []types.Package,
) *types.RuntimeTopology {
for _, pkg := range packages {
name := strings.ToLower(strings.TrimSpace(pkg.Id.Name.String()))
if name != "kilt" {
continue
}
if pkg.Id.Platform != types.PlatformFabric {
continue
}
entry, ok := FindEntry(RuntimeNodeFabric)
if !ok {
return nil
}
return BuildTopologyFromEntry(entry)
}
return nil
}
func detectedRuntimeEvidenceFromHints(hints []string) []types.RuntimeNodeID {
if len(hints) == 0 {
return nil
}
detected := make([]types.RuntimeNodeID, 0, len(hints))
for _, hint := range hints {
switch types.RuntimeNodeID(hint) {
case RuntimeNodeConnector,
RuntimeNodeKilt,
RuntimeNodeVelocity,
RuntimeNodeBungeecord,
RuntimeNodeGeyser,
RuntimeNodeGeyserStandalone:
detected = append(detected, types.RuntimeNodeID(hint))
}
}
return detected
}
func hasAnyName(names map[string]struct{}, candidates ...string) bool {
for _, candidate := range candidates {
if _, ok := names[candidate]; ok {
return true
}
}
return false
}
// NormalizeTopology deduplicates nodes (by ID, last-write wins) and edges
// (by From+To+Kind triple), then sorts both slices for deterministic output.
// Safe to call on nil or unresolved topologies.
func NormalizeTopology(t *types.RuntimeTopology) {
if t == nil {
return
}
seenNodes := make(map[types.RuntimeNodeID]int, len(t.Nodes))
deduped := make([]types.RuntimeNode, 0, len(t.Nodes))
for _, node := range t.Nodes {
if idx, exists := seenNodes[node.ID]; exists {
deduped[idx] = node
} else {
seenNodes[node.ID] = len(deduped)
deduped = append(deduped, node)
}
}
t.Nodes = deduped
type edgeKey struct {
From types.RuntimeNodeID
To types.RuntimeNodeID
Kind types.RuntimeEdgeVerb
}
seenEdges := make(map[edgeKey]struct{}, len(t.Edges))
dedupedEdges := make([]types.RuntimeEdge, 0, len(t.Edges))
for _, edge := range t.Edges {
key := edgeKey{From: edge.From, To: edge.To, Kind: edge.Verb}
if _, exists := seenEdges[key]; exists {
continue
}
seenEdges[key] = struct{}{}
dedupedEdges = append(dedupedEdges, edge)
}
t.Edges = dedupedEdges
sortTopology(t)
}
// FoldTopologyRisk propagates the maximum node risk level across all connected
// components by repeatedly folding each edge's endpoints to their maximum risk.
// Safe to call on nil or unresolved topologies.
func FoldTopologyRisk(t *types.RuntimeTopology) {
if t == nil {
return
}
nodeIndex := make(map[types.RuntimeNodeID]int, len(t.Nodes))
for i, node := range t.Nodes {
nodeIndex[node.ID] = i
}
changed := true
for changed {
changed = false
for _, edge := range t.Edges {
from, okFrom := nodeIndex[edge.From]
to, okTo := nodeIndex[edge.To]
if !okFrom || !okTo {
continue
}
maxRisk := max(t.Nodes[from].RiskLevel, t.Nodes[to].RiskLevel)
if t.Nodes[from].RiskLevel != maxRisk {
t.Nodes[from].RiskLevel = maxRisk
changed = true
}
if t.Nodes[to].RiskLevel != maxRisk {
t.Nodes[to].RiskLevel = maxRisk
changed = true
}
}
}
}
func mergeTopology(dst *types.RuntimeTopology, src *types.RuntimeTopology) {
if dst == nil || src == nil {
return
}
seenNodes := make(
map[types.RuntimeNodeID]struct{},
len(dst.Nodes)+len(src.Nodes),
)
for _, node := range dst.Nodes {
seenNodes[node.ID] = struct{}{}
}
for _, node := range src.Nodes {
if _, exists := seenNodes[node.ID]; exists {
continue
}
dst.Nodes = append(dst.Nodes, node)
seenNodes[node.ID] = struct{}{}
}
seenEdges := make(
map[types.RuntimeEdge]struct{},
len(dst.Edges)+len(src.Edges),
)
for _, edge := range dst.Edges {
seenEdges[edge] = struct{}{}
}
for _, edge := range src.Edges {
if _, exists := seenEdges[edge]; exists {
continue
}
dst.Edges = append(dst.Edges, edge)
seenEdges[edge] = struct{}{}
}
sortTopology(dst)
}
func sortTopology(t *types.RuntimeTopology) {
if t == nil {
return
}
sort.Slice(
t.Nodes, func(i, j int) bool {
return string(t.Nodes[i].ID) < string(t.Nodes[j].ID)
},
)
sort.Slice(
t.Edges, func(i, j int) bool {
if t.Edges[i].From != t.Edges[j].From {
return string(t.Edges[i].From) < string(t.Edges[j].From)
}
if t.Edges[i].To != t.Edges[j].To {
return string(t.Edges[i].To) < string(t.Edges[j].To)
}
return string(t.Edges[i].Verb) < string(t.Edges[j].Verb)
},
)
}
// Package probe provides functionality to gather and manage server information
// for a Minecraft server. It includes methods to retrieve server configuration,
// mod list, executable information, and other relevant details. The package
// utilizes memoization to avoid redundant calculations and resolve any data
// dependencies issues. Therefore, all probe functions are 100% concurrent-safe.
//
// The main exposed function is ServerInfo, which returns a comprehensive
// ServerInfo struct containing all the gathered information. To avoid side
// effects, the ServerInfo struct is returned as a copy, rather than reference.
package probe
import (
"fmt"
"github.com/mclucy/lucy/types"
)
// PURE POLICY LAYER: These evaluators are deterministic and side-effect free.
// They take topology values as input and return compatibility verdicts.
// No file I/O, no network calls, no logging, no panic.
//
// EvaluateCompatibility evaluates whether a server runtime (described by topology)
// can support the requested ecosystem. Verdict encodes direct support, indirect/hosted
// support, incompatibility, or unresolved topology. Indirect support is reported as
// CompatDegraded, while runtime risk remains a node-level topology concern. Never
// returns nil - always returns a deterministic result.
func EvaluateCompatibility(topology *types.RuntimeTopology, requiredCapability types.RuntimeCapability) types.CompatResult {
if topology == nil || !topology.Resolved() {
return types.CompatResult{
Verdict: types.CompatUnresolved,
Reason: "topology_unresolved",
Detail: "Server runtime topology has not been probed or could not be determined.",
}
}
// Collect nodes reachable only via EdgeHosts (indirect/hosted paths).
hostedTargets := make(map[types.RuntimeNodeID]struct{}, len(topology.Edges))
for _, edge := range topology.Edges {
if edge.Verb != types.EdgeHosts {
continue
}
targetNode, ok := topology.FindNode(edge.To)
if !ok || !targetNode.HasCapability(requiredCapability) {
continue
}
hostedTargets[edge.To] = struct{}{}
}
// Direct capability match (not via hosted path).
for _, node := range topology.Nodes {
if _, isHostedTarget := hostedTargets[node.ID]; isHostedTarget {
continue
}
if node.HasCapability(requiredCapability) {
return types.CompatResult{
Verdict: types.CompatCompatible,
Reason: "direct_capability_match",
Detail: fmt.Sprintf("Runtime has direct support for %s.", requiredCapability),
}
}
}
// Indirect/hosted capability match — always degraded regardless of node risk.
if len(hostedTargets) > 0 {
return types.CompatResult{
Verdict: types.CompatDegraded,
Reason: "indirect_capability_match",
Detail: fmt.Sprintf("Support for %s is available through a hosted or indirect runtime path.", requiredCapability),
}
}
return types.CompatResult{
Verdict: types.CompatIncompatible,
Reason: "no_capability_match",
Detail: fmt.Sprintf("Runtime does not support %s.", requiredCapability),
}
}
// CapabilityForPlatform maps a package's Platform identity to the RuntimeCapability
// it requires in the host server's topology. Returns empty string if no mapping exists.
func CapabilityForPlatform(p types.Platform) types.RuntimeCapability {
switch p {
case types.PlatformFabric:
return types.CapabilityFabricMods
case types.PlatformForge:
return types.CapabilityForgeMods
case types.PlatformNeoforge:
return types.CapabilityNeoforgeMods
case types.Platform("bukkit"), types.Platform("paper"), types.Platform("spigot"), types.Platform("folia"), types.Platform("leaves"):
return types.CapabilityBukkitPlugins
case types.Platform("velocity"):
return types.CapabilityVelocityPlugins
case types.Platform("bungeecord"), types.Platform("bungee"), types.Platform("waterfall"):
return types.CapabilityBungeecordPlugins
case types.PlatformMCDR:
return types.CapabilityMCDRPlugins
case types.Platform("sponge"):
return types.CapabilitySpongePlugins
default:
return ""
}
}
package probe
import (
internaltopology "github.com/mclucy/lucy/probe/internal/topology"
"github.com/mclucy/lucy/types"
)
type RegistryEntry struct {
NodeID types.RuntimeNodeID
Role types.RuntimeRole
Capabilities []types.RuntimeCapability
RiskLevel types.RuntimeRiskLevel
PolicyEdges []RegistryEdge
}
type RegistryEdge struct {
TargetNodeID types.RuntimeNodeID
Kind types.RuntimeEdgeVerb
}
type RuntimeRegistry struct {
byID map[types.RuntimeNodeID]RegistryEntry
}
var DefaultRegistry = NewRuntimeRegistry(defaultRegistryEntries)
func NewRuntimeRegistry(entries []RegistryEntry) RuntimeRegistry {
registry := RuntimeRegistry{
byID: make(map[types.RuntimeNodeID]RegistryEntry, len(entries)),
}
for _, entry := range entries {
stored := RegistryEntry{
NodeID: entry.NodeID,
Role: entry.Role,
Capabilities: append([]types.RuntimeCapability(nil), entry.Capabilities...),
RiskLevel: entry.RiskLevel,
PolicyEdges: append([]RegistryEdge(nil), entry.PolicyEdges...),
}
registry.byID[stored.NodeID] = stored
}
return registry
}
func (r RuntimeRegistry) FindEntry(id types.RuntimeNodeID) (RegistryEntry, bool) {
entry, ok := r.byID[id]
if !ok {
return RegistryEntry{}, false
}
return cloneEntry(entry), true
}
func FindEntry(id types.RuntimeNodeID) (RegistryEntry, bool) {
return DefaultRegistry.FindEntry(id)
}
// BuildTopologyFromEntry constructs a RuntimeTopology with a single primary node
// from a registry entry, plus any policy edges listed.
func BuildTopologyFromEntry(entry RegistryEntry) *types.RuntimeTopology {
if entry.NodeID == types.RuntimeNodeUnknown {
return &types.RuntimeTopology{}
}
nodes := []types.RuntimeNode{{
ID: entry.NodeID,
Role: entry.Role,
Capabilities: append([]types.RuntimeCapability(nil), entry.Capabilities...),
RiskLevel: entry.RiskLevel,
}}
edges := make([]types.RuntimeEdge, 0, len(entry.PolicyEdges))
seenNode := map[types.RuntimeNodeID]struct{}{entry.NodeID: {}}
for _, policyEdge := range entry.PolicyEdges {
edges = append(edges, types.RuntimeEdge{
From: entry.NodeID,
To: policyEdge.TargetNodeID,
Verb: policyEdge.Kind,
})
if _, alreadyAdded := seenNode[policyEdge.TargetNodeID]; alreadyAdded {
continue
}
target, ok := FindEntry(policyEdge.TargetNodeID)
if !ok {
continue
}
nodes = append(nodes, types.RuntimeNode{
ID: target.NodeID,
Role: target.Role,
Capabilities: append([]types.RuntimeCapability(nil), target.Capabilities...),
RiskLevel: target.RiskLevel,
})
seenNode[policyEdge.TargetNodeID] = struct{}{}
}
topology := &types.RuntimeTopology{
PrimaryNode: entry.NodeID,
Nodes: nodes,
Edges: edges,
}
applyDeclarativeConnections(topology, internaltopology.DefaultConnectionRegistry)
NormalizeTopology(topology)
FoldTopologyRisk(topology)
return topology
}
func cloneEntry(entry RegistryEntry) RegistryEntry {
return RegistryEntry{
NodeID: entry.NodeID,
Role: entry.Role,
Capabilities: append([]types.RuntimeCapability(nil), entry.Capabilities...),
RiskLevel: entry.RiskLevel,
PolicyEdges: append([]RegistryEdge(nil), entry.PolicyEdges...),
}
}
package probe
import (
"strings"
"github.com/mclucy/lucy/types"
)
// probe_topology_registry_data.go contains procedural topology lookup helpers,
// not the declarative topology source-of-truth.
var normalizedRuntimeIDByName = map[string]types.RuntimeNodeID{
"minecraft": RuntimeNodeMinecraft,
"vanilla": RuntimeNodeMinecraft,
"fabric": RuntimeNodeFabric,
"fabric server": RuntimeNodeFabric,
"forge": RuntimeNodeForge,
"forge server": RuntimeNodeForge,
"neoforge": RuntimeNodeNeoforge,
"neoforge server": RuntimeNodeNeoforge,
"mcdr": RuntimeNodeMCDR,
"mcdr plugin": RuntimeNodeMCDR,
"paper": RuntimeNodePaper,
"spigot": RuntimeNodeSpigot,
"paper-fork": RuntimeNodePaperFork,
"craftbukkit": RuntimeNodeCraftBukkit,
"bukkit": RuntimeNodeBukkit,
"folia": RuntimeNodeFolia,
"leaves": RuntimeNodeLeaves,
"velocity": RuntimeNodeVelocity,
"bungeecord": RuntimeNodeBungeecord,
"bungee": RuntimeNodeBungeecord,
"waterfall": RuntimeNodeWaterfall,
"sponge": RuntimeNodeSponge,
"arclight": RuntimeNodeArclight,
"kilt": RuntimeNodeKilt,
"geyser": RuntimeNodeGeyser,
"geyser standalone": RuntimeNodeGeyserStandalone,
}
func NormalizeRuntimeID(name string) types.RuntimeNodeID {
normalized := strings.TrimSpace(strings.ToLower(name))
if normalized == "" {
return types.RuntimeNodeUnknown
}
id, ok := normalizedRuntimeIDByName[normalized]
if !ok {
return types.RuntimeNodeUnknown
}
return id
}
package slugmap
import (
"encoding/json"
"os"
"path/filepath"
"sync"
"github.com/mclucy/lucy/types"
)
// Entry is one resolved mapping.
type Entry struct {
Source types.Source `json:"source"`
LocalId string `json:"local_id"`
FileHash string `json:"file_hash"`
CanonicalSlug string `json:"canonical_slug"`
// ResolvedBy is always "hash" — only hash-verified slugs are persisted.
ResolvedBy string `json:"resolved_by"`
}
type store struct {
mu sync.RWMutex
path string
entries map[string]Entry // key: source+"/"+localId or source+"/"+localId+"/"+fileHash
}
var (
defaultStore *store
once sync.Once
)
func Default() *store {
once.Do(
func() {
dir, err := os.UserConfigDir()
if err != nil {
dir = os.TempDir()
}
p := filepath.Join(dir, "lucy", "slugmap.json")
defaultStore = &store{path: p, entries: make(map[string]Entry)}
_ = defaultStore.load()
},
)
return defaultStore
}
func preciseKey(src types.Source, localId, fileHash string) string {
return string(src) + "/" + localId + "/" + fileHash
}
func looseKey(src types.Source, localId string) string {
return string(src) + "/" + localId
}
func (s *store) Set(src types.Source, localId, fileHash, canonicalSlug, resolvedBy string) {
s.mu.Lock()
defer s.mu.Unlock()
s.entries[preciseKey(src, localId, fileHash)] = Entry{
Source: src,
LocalId: localId,
FileHash: fileHash,
CanonicalSlug: canonicalSlug,
ResolvedBy: resolvedBy,
}
s.entries[looseKey(src, localId)] = Entry{
Source: src,
LocalId: localId,
FileHash: "",
CanonicalSlug: canonicalSlug,
ResolvedBy: resolvedBy,
}
_ = s.flush()
}
func (s *store) Get(src types.Source, localId, fileHash string) (slug string, ok bool) {
s.mu.RLock()
defer s.mu.RUnlock()
e, ok := s.entries[preciseKey(src, localId, fileHash)]
if !ok {
return "", false
}
return e.CanonicalSlug, true
}
func (s *store) GetLoose(src types.Source, localId string) (slug string, ok bool) {
s.mu.RLock()
defer s.mu.RUnlock()
e, ok := s.entries[looseKey(src, localId)]
if !ok {
return "", false
}
return e.CanonicalSlug, true
}
func (s *store) All() []Entry {
s.mu.RLock()
defer s.mu.RUnlock()
out := make([]Entry, 0, len(s.entries)/2)
for _, e := range s.entries {
if e.FileHash != "" {
out = append(out, e)
}
}
return out
}
func (s *store) Clear() {
s.mu.Lock()
defer s.mu.Unlock()
s.entries = make(map[string]Entry)
_ = s.flush()
}
func (s *store) load() error {
data, err := os.ReadFile(s.path)
if err != nil {
return err
}
return json.Unmarshal(data, &s.entries)
}
func (s *store) flush() error {
if err := os.MkdirAll(filepath.Dir(s.path), 0o700); err != nil {
return err
}
data, err := json.MarshalIndent(s.entries, "", " ")
if err != nil {
return err
}
return os.WriteFile(s.path, data, 0o600)
}
package state
type ArtifactRelationKind string
const RelationEmbedded ArtifactRelationKind = "embedded"
type ArtifactRelation struct {
ParentID string
ChildID string
Kind ArtifactRelationKind
}
type ManagedArtifact struct {
ID string
Version string
InstallPath string
Class ArtifactClass
Side string
Optional bool
Embedded bool
EmbeddedIn string
Provenance []string
Source string
Hash string
HashAlgorithm string
}
type BundleArtifact struct {
Name string
Type string
InstallPath string
Hash string
}
type ArtifactSet struct {
Packages []ManagedArtifact
Bundles []BundleArtifact
Relations []ArtifactRelation
}
func LockToArtifactSet(lock *Lock) ArtifactSet {
if lock == nil {
return ArtifactSet{}
}
scope := NewManagedScope(nil, nil)
artifacts := ArtifactSet{
Packages: make([]ManagedArtifact, 0, len(lock.Packages)),
Bundles: make([]BundleArtifact, 0, len(lock.Bundles)),
Relations: make([]ArtifactRelation, 0),
}
for _, pkg := range lock.Packages {
class := ClassifyPath(scope, pkg.InstallPath)
if pkg.Embedded {
class = ClassEmbedded
}
artifacts.Packages = append(artifacts.Packages, ManagedArtifact{
ID: pkg.ID,
Version: pkg.Version,
InstallPath: pkg.InstallPath,
Class: class,
Side: pkg.Side,
Optional: pkg.Optional,
Embedded: pkg.Embedded,
EmbeddedIn: pkg.EmbeddedIn,
Provenance: append([]string(nil), pkg.Provenance...),
Source: pkg.Source,
Hash: pkg.Hash,
HashAlgorithm: pkg.HashAlgorithm,
})
if pkg.Embedded && pkg.EmbeddedIn != "" {
artifacts.Relations = append(artifacts.Relations, ArtifactRelation{
ParentID: pkg.EmbeddedIn,
ChildID: pkg.ID,
Kind: RelationEmbedded,
})
}
}
for _, bundle := range lock.Bundles {
artifacts.Bundles = append(artifacts.Bundles, BundleArtifact{
Name: bundle.Name,
Type: bundle.Type,
InstallPath: bundle.InstallPath,
Hash: bundle.Hash,
})
}
return artifacts
}
func ManagedPackages(as ArtifactSet) []ManagedArtifact {
packages := make([]ManagedArtifact, 0, len(as.Packages))
for _, pkg := range as.Packages {
if pkg.Embedded || pkg.Class == ClassUnmanaged {
continue
}
packages = append(packages, pkg)
}
return packages
}
func EmbeddedPackages(as ArtifactSet) []ManagedArtifact {
packages := make([]ManagedArtifact, 0, len(as.Packages))
for _, pkg := range as.Packages {
if !pkg.Embedded {
continue
}
packages = append(packages, pkg)
}
return packages
}
package state
import (
"bytes"
"strconv"
)
// ParseConfig parses TOML config bytes into a validated Config.
func ParseConfig(data []byte) (*Config, error) {
var cfg Config
if err := cfg.Unmarshal(data); err != nil {
return nil, malformedStateError(ConfigFile, "document", err)
}
if err := ValidateConfig(cfg); err != nil {
return nil, err
}
return &cfg, nil
}
// SerializeConfig serializes a validated Config deterministically.
func SerializeConfig(c *Config) ([]byte, error) {
if c == nil {
return nil, NewStateError(ConfigFile, ErrMalformed, "document", "config is nil")
}
if err := ValidateConfig(*c); err != nil {
return nil, err
}
return marshalConfigDeterministic(c), nil
}
// ParseManifest parses manifest bytes into a validated Manifest.
func ParseManifest(data []byte) (*Manifest, error) {
var manifest Manifest
if err := manifest.Unmarshal(data); err != nil {
return nil, malformedStateError(ManifestFile, "document", err)
}
if err := ValidateManifest(manifest); err != nil {
return nil, err
}
return &manifest, nil
}
// SerializeManifest serializes a validated Manifest deterministically.
func SerializeManifest(m *Manifest) ([]byte, error) {
if m == nil {
return nil, NewStateError(ManifestFile, ErrMalformed, "document", "manifest is nil")
}
if err := ValidateManifest(*m); err != nil {
return nil, err
}
data, err := m.Marshal()
if err != nil {
return nil, malformedStateError(ManifestFile, "document", err)
}
return data, nil
}
// ParseLock parses JSON lock bytes into a validated Lock.
func ParseLock(data []byte) (*Lock, error) {
var lock Lock
if err := lock.Unmarshal(data); err != nil {
return nil, malformedStateError(LockFile, "document", err)
}
if err := ValidateLock(lock); err != nil {
return nil, err
}
return &lock, nil
}
// SerializeLock serializes a validated Lock deterministically.
func SerializeLock(l *Lock) ([]byte, error) {
if l == nil {
return nil, NewStateError(LockFile, ErrMalformed, "document", "lock is nil")
}
if err := ValidateLock(*l); err != nil {
return nil, err
}
data, err := l.Marshal()
if err != nil {
return nil, malformedStateError(LockFile, "document", err)
}
return data, nil
}
func marshalConfigDeterministic(c *Config) []byte {
var buf bytes.Buffer
buf.WriteString("[meta]\n")
buf.WriteString("format_version = ")
buf.WriteString(strconv.Quote(c.Meta.FormatVersion))
buf.WriteString("\n\n")
buf.WriteString("[sources]\n")
buf.WriteString("priority = ")
buf.WriteString(formatStringArray(c.Sources.Priority))
buf.WriteString("\n")
buf.WriteString("preferred = ")
buf.WriteString(strconv.Quote(c.Sources.Preferred))
buf.WriteString("\n")
buf.WriteString("allow_custom = ")
buf.WriteString(strconv.FormatBool(c.Sources.AllowCustom))
buf.WriteString("\n\n")
buf.WriteString("[upgrade]\n")
buf.WriteString("mode = ")
buf.WriteString(strconv.Quote(c.Upgrade.Mode))
buf.WriteString("\n")
buf.WriteString("allow_major_bumps = ")
buf.WriteString(strconv.FormatBool(c.Upgrade.AllowMajorBumps))
buf.WriteString("\n\n")
buf.WriteString("[scope]\n")
buf.WriteString("managed_roots = ")
buf.WriteString(formatStringArray(c.Scope.ManagedRoots))
buf.WriteString("\n")
buf.WriteString("unmanaged_paths = ")
buf.WriteString(formatStringArray(c.Scope.UnmanagedPaths))
buf.WriteString("\n")
buf.WriteString("preserve_on_remove = ")
buf.WriteString(formatStringArray(c.Scope.PreserveOnRemove))
buf.WriteString("\n\n")
buf.WriteString("[optional]\n")
buf.WriteString("include_optional = ")
buf.WriteString(strconv.FormatBool(c.Optional.IncludeOptional))
buf.WriteString("\n")
buf.WriteString("client_mods = ")
buf.WriteString(strconv.FormatBool(c.Optional.ClientMods))
buf.WriteString("\n\n")
buf.WriteString("[output]\n")
buf.WriteString("no_style = ")
buf.WriteString(strconv.FormatBool(c.Output.NoStyle))
buf.WriteString("\n")
buf.WriteString("json = ")
buf.WriteString(strconv.FormatBool(c.Output.JSON))
buf.WriteString("\n")
return buf.Bytes()
}
func formatStringArray(values []string) string {
quoted := make([]string, 0, len(values))
for _, value := range values {
quoted = append(quoted, strconv.Quote(value))
}
return "[" + joinQuoted(quoted) + "]"
}
func joinQuoted(values []string) string {
if len(values) == 0 {
return ""
}
var buf bytes.Buffer
for i, value := range values {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(value)
}
return buf.String()
}
package state
import (
"slices"
"strings"
)
type StateDiff struct {
InManifestNotLock []string
InLockNotManifest []string
InLockNotObserved []string
InObservedNotLock []string
IgnoredObserved []string
UnmanagedObserved []string
}
// DiffDesiredResolved compares desired membership with resolved membership.
//
// It intentionally compares package identity only. Manifest versions may remain
// fuzzy intent selectors, while lock versions are exact facts. Exact-version
// drift for the same package ID is tracked by lock staleness
// (manifest_fingerprint mismatch) and the next resolve/install run, not by this
// membership diff.
func DiffDesiredResolved(manifest *Manifest, lock *Lock) StateDiff {
diff := StateDiff{}
manifestIDs := make(map[string]struct{})
if manifest != nil {
for _, pkg := range manifest.Packages {
if pkg.ID == "" {
continue
}
manifestIDs[pkg.ID] = struct{}{}
}
}
lockIDs := make(map[string]struct{})
if lock != nil {
for _, pkg := range lock.Packages {
if pkg.ID == "" {
continue
}
lockIDs[pkg.ID] = struct{}{}
}
}
for id := range manifestIDs {
if _, ok := lockIDs[id]; !ok {
diff.InManifestNotLock = append(diff.InManifestNotLock, id)
}
}
for id := range lockIDs {
if _, ok := manifestIDs[id]; !ok {
diff.InLockNotManifest = append(diff.InLockNotManifest, id)
}
}
slices.Sort(diff.InManifestNotLock)
slices.Sort(diff.InLockNotManifest)
return diff
}
// DiffResolvedObserved compares exact lock install targets with current
// observed paths. Observed drift is always checked against lock facts, never
// against fuzzy manifest selectors.
func DiffResolvedObserved(lock *Lock, observedPaths []string) StateDiff {
return DiffResolvedObservedInScope(lock, observedPaths, NewManagedScope(nil, nil), nil)
}
// DiffResolvedObservedInScope compares exact lock install targets with observed
// paths while separating Lucy-managed drift from ignored/manual content and
// content outside managed sync scope.
func DiffResolvedObservedInScope(lock *Lock, observedPaths []string, scope ManagedScope, ignoredPaths []string) StateDiff {
diff := StateDiff{}
lockPaths := make(map[string]struct{})
if lock != nil {
for _, pkg := range lock.Packages {
normalized := normalizeRelativePath(pkg.InstallPath)
if normalized == "" || normalized == "." {
continue
}
lockPaths[normalized] = struct{}{}
}
}
ignored := make(map[string]struct{}, len(ignoredPaths))
for _, path := range ignoredPaths {
normalized := normalizeRelativePath(path)
if normalized == "" || normalized == "." {
continue
}
ignored[normalized] = struct{}{}
}
observed := make(map[string]struct{}, len(observedPaths))
for _, path := range observedPaths {
normalized := normalizeRelativePath(path)
if normalized == "" || normalized == "." {
continue
}
observed[normalized] = struct{}{}
}
for path := range lockPaths {
if _, ok := observed[path]; !ok {
diff.InLockNotObserved = append(diff.InLockNotObserved, path)
}
}
for path := range observed {
if _, ok := lockPaths[path]; ok {
continue
}
if _, ok := ignored[path]; ok {
diff.IgnoredObserved = append(diff.IgnoredObserved, path)
continue
}
if !IsManaged(scope, path) {
diff.UnmanagedObserved = append(diff.UnmanagedObserved, path)
continue
}
diff.InObservedNotLock = append(diff.InObservedNotLock, path)
}
slices.Sort(diff.InLockNotObserved)
slices.Sort(diff.InObservedNotLock)
slices.Sort(diff.IgnoredObserved)
slices.Sort(diff.UnmanagedObserved)
return diff
}
// IgnoredInstallPaths resolves manifest ignored entries to their known install
// paths from the lock so observed files can stay visible without being treated
// as managed drift.
func IgnoredInstallPaths(manifest *Manifest, lock *Lock) []string {
if manifest == nil || lock == nil {
return nil
}
ignoredIDs := make(map[string]struct{})
for _, pkg := range manifest.Packages {
if pkg.Role != RoleIgnored || strings.TrimSpace(pkg.ID) == "" {
continue
}
ignoredIDs[pkg.ID] = struct{}{}
}
paths := make([]string, 0, len(ignoredIDs))
for _, pkg := range lock.Packages {
if _, ok := ignoredIDs[pkg.ID]; !ok {
continue
}
normalized := normalizeRelativePath(pkg.InstallPath)
if normalized == "" || normalized == "." {
continue
}
paths = append(paths, normalized)
}
slices.Sort(paths)
return slices.Compact(paths)
}
// CompareManifestLockObserved combines intent-vs-lock and lock-vs-observed
// comparisons under Lucy's softer manifest model.
func CompareManifestLockObserved(manifest *Manifest, lock *Lock, observedPaths []string) StateDiff {
managedManifest := manifestForComparison(manifest)
managedLock := lockForComparison(manifest, lock)
scope := NewManagedScope(nil, nil)
intentDiff := DiffDesiredResolved(managedManifest, managedLock)
observedDiff := DiffResolvedObservedInScope(managedLock, observedPaths, scope, IgnoredInstallPaths(manifest, lock))
return StateDiff{
InManifestNotLock: intentDiff.InManifestNotLock,
InLockNotManifest: intentDiff.InLockNotManifest,
InLockNotObserved: observedDiff.InLockNotObserved,
InObservedNotLock: observedDiff.InObservedNotLock,
IgnoredObserved: observedDiff.IgnoredObserved,
UnmanagedObserved: observedDiff.UnmanagedObserved,
}
}
func manifestForComparison(manifest *Manifest) *Manifest {
if manifest == nil {
return nil
}
cloned := *manifest
cloned.Packages = make([]ManifestPackage, 0, len(manifest.Packages))
for _, pkg := range manifest.Packages {
if pkg.Role == RoleIgnored {
continue
}
cloned.Packages = append(cloned.Packages, pkg)
}
return &cloned
}
func lockForComparison(manifest *Manifest, lock *Lock) *Lock {
if lock == nil {
return nil
}
ignoredIDs := make(map[string]struct{})
if manifest != nil {
for _, pkg := range manifest.Packages {
if pkg.Role != RoleIgnored || strings.TrimSpace(pkg.ID) == "" {
continue
}
ignoredIDs[pkg.ID] = struct{}{}
}
}
filtered := *lock
filtered.Packages = make([]LockedPackage, 0, len(lock.Packages))
for _, pkg := range lock.Packages {
if _, ok := ignoredIDs[pkg.ID]; ok {
continue
}
filtered.Packages = append(filtered.Packages, pkg)
}
filtered.Packages = CanonicalLockedPackages(filtered.Packages)
return &filtered
}
func ClassifyDrift(diff StateDiff) string {
parts := make([]string, 0, 4)
if len(diff.InManifestNotLock) > 0 {
parts = append(parts, "unresolved intent")
}
if len(diff.InLockNotManifest) > 0 {
parts = append(parts, "stale lock facts")
}
if len(diff.InLockNotObserved) > 0 || len(diff.InObservedNotLock) > 0 {
parts = append(parts, "runtime drift")
}
if len(diff.IgnoredObserved) > 0 || len(diff.UnmanagedObserved) > 0 {
parts = append(parts, "ignored/manual content")
}
if len(parts) == 0 {
return "in sync"
}
return "has " + joinDiagnosticParts(parts)
}
func joinDiagnosticParts(parts []string) string {
if len(parts) == 0 {
return ""
}
if len(parts) == 1 {
return parts[0]
}
if len(parts) == 2 {
return parts[0] + " and " + parts[1]
}
return strings.Join(parts[:len(parts)-1], ", ") + ", and " + parts[len(parts)-1]
}
package state
import (
"bytes"
"fmt"
"strconv"
"strings"
"github.com/pelletier/go-toml"
)
// Config represents the policy and defaults for a Lucy project.
// It is persisted in .lucy/config.toml and controls source selection,
// upgrade behavior, scope boundaries, and output formatting.
//
// Config OWNS: policy.defaults, policy.source-selection, policy.safety
// Config MUST NOT own: intent.direct-roots, resolution.graph, artifact.hashes,
// artifact.download-urls
type Config struct {
// Meta contains format metadata.
Meta MetaConfig `toml:"meta"`
// Sources defines source selection and priority rules.
Sources SourcesConfig `toml:"sources"`
// Upgrade defines version resolution and upgrade policies.
Upgrade UpgradeConfig `toml:"upgrade"`
// Scope defines managed and unmanaged path boundaries.
Scope ScopeConfig `toml:"scope"`
// Optional defines optional package handling.
Optional OptionalConfig `toml:"optional"`
// Output defines CLI output formatting.
Output OutputConfig `toml:"output"`
}
// MetaConfig contains format metadata for the config file.
type MetaConfig struct {
// FormatVersion specifies the config file format version.
// Use "v1" for the current schema.
FormatVersion string `toml:"format_version"`
}
// SourcesConfig defines source selection and priority rules.
type SourcesConfig struct {
// Priority defines the ordered list of sources to try when resolving
// packages. Earlier sources have higher priority.
Priority []string `toml:"priority"`
// Preferred defines the default source when SourceAuto is specified.
// Valid values: "auto", "modrinth", "curseforge", "github", "mcdr"
Preferred string `toml:"preferred"`
// AllowCustom enables custom source URLs (e.g., direct git or http URLs).
AllowCustom bool `toml:"allow_custom"`
}
// UpgradeConfig defines version resolution and upgrade policies.
type UpgradeConfig struct {
// Mode defines the version resolution strategy.
// "compatible" - use compatible version (default)
// "latest" - use latest version
// "pinned" - use exactly specified version
Mode string `toml:"mode"`
// AllowMajorBumps enables major version upgrades.
AllowMajorBumps bool `toml:"allow_major_bumps"`
}
// ScopeConfig defines managed and unmanaged path boundaries.
type ScopeConfig struct {
// ManagedRoots specifies the list of relative paths that Lucy manages.
// These directories are where Lucy installs and tracks packages.
ManagedRoots []string `toml:"managed_roots"`
// UnmanagedPaths is a list of glob patterns to exclude from drift detection.
// Files matching these patterns are ignored by status/drift commands.
UnmanagedPaths []string `toml:"unmanaged_paths"`
// PreserveOnRemove lists glob patterns for files to preserve when packages
// are removed. These files will not be deleted during cleanup operations.
PreserveOnRemove []string `toml:"preserve_on_remove"`
}
// OptionalConfig defines optional package handling.
type OptionalConfig struct {
// IncludeOptional controls whether optional dependencies are included.
IncludeOptional bool `toml:"include_optional"`
// ClientMods controls whether client-side only mods are included.
ClientMods bool `toml:"client_mods"`
}
// OutputConfig defines CLI output formatting.
type OutputConfig struct {
// NoStyle disables colored and styled output.
NoStyle bool `toml:"no_style"`
// JSON enables JSON output format.
JSON bool `toml:"json"`
}
// ConfigDefaults returns a Config value with default settings.
func ConfigDefaults() Config {
return Config{
Meta: MetaConfig{
FormatVersion: SupportedVersion,
},
Sources: SourcesConfig{
Priority: []string{"modrinth", "curseforge", "github", "mcdr"},
Preferred: "auto",
AllowCustom: false,
},
Upgrade: UpgradeConfig{
Mode: "compatible",
AllowMajorBumps: false,
},
Scope: ScopeConfig{
ManagedRoots: []string{"mods", "plugins", "config"},
UnmanagedPaths: []string{},
PreserveOnRemove: []string{"config/**"},
},
Optional: OptionalConfig{
IncludeOptional: false,
ClientMods: false,
},
Output: OutputConfig{
NoStyle: false,
JSON: false,
},
}
}
// ValidateConfig validates the config and returns an error if any fields
// outside the policy domain are present.
//
// Config schema enforces that only policy fields are present, so this
// primarily serves as a safeguard against future schema drift.
func ValidateConfig(c Config) error {
// Check that format_version is set and valid
if err := ValidateVersion(c.Meta.FormatVersion); err != nil {
if IsVersionError(err) {
return versionStateError(ConfigFile, "meta.format_version", c.Meta.FormatVersion, ErrVersionUnsupported)
}
return versionStateError(ConfigFile, "meta.format_version", c.Meta.FormatVersion, ErrMalformed)
}
// Validate source priority contains valid source names
validSources := map[string]bool{
"modrinth": true,
"curseforge": true,
"github": true,
"mcdr": true,
}
for _, src := range c.Sources.Priority {
if !validSources[src] {
return NewStateError(ConfigFile, ErrMalformed, "sources.priority", fmt.Sprintf("invalid source %q in priority list", src))
}
}
// Validate preferred source
if c.Sources.Preferred != "auto" && !validSources[c.Sources.Preferred] {
return NewStateError(ConfigFile, ErrMalformed, "sources.preferred", fmt.Sprintf("invalid preferred source %q", c.Sources.Preferred))
}
// Validate upgrade mode
validModes := map[string]bool{
"compatible": true,
"latest": true,
"pinned": true,
}
if !validModes[c.Upgrade.Mode] {
return NewStateError(ConfigFile, ErrMalformed, "upgrade.mode", fmt.Sprintf("invalid upgrade mode %q", c.Upgrade.Mode))
}
// Config MUST NOT own these - they belong to manifest/lock
// This is enforced by the struct definition itself, but we add
// a runtime check as a safeguard
if c.Meta.FormatVersion == "" {
// This should never happen due to type system, but kept as documentation
return NewStateError(ConfigFile, ErrBoundaryViolation, "meta.format_version", "reserved field detected - schema enforces policy domain only")
}
return nil
}
func (c Config) Marshal() ([]byte, error) {
var buf bytes.Buffer
writeTomlSectionHeader(&buf, "meta")
writeTomlStringField(&buf, "format_version", c.Meta.FormatVersion)
buf.WriteString("\n")
writeTomlSectionHeader(&buf, "sources")
writeTomlStringSliceField(&buf, "priority", c.Sources.Priority)
writeTomlStringField(&buf, "preferred", c.Sources.Preferred)
writeTomlBoolField(&buf, "allow_custom", c.Sources.AllowCustom)
buf.WriteString("\n")
writeTomlSectionHeader(&buf, "upgrade")
writeTomlStringField(&buf, "mode", c.Upgrade.Mode)
writeTomlBoolField(&buf, "allow_major_bumps", c.Upgrade.AllowMajorBumps)
buf.WriteString("\n")
writeTomlSectionHeader(&buf, "scope")
writeTomlStringSliceField(&buf, "managed_roots", c.Scope.ManagedRoots)
writeTomlStringSliceField(&buf, "unmanaged_paths", c.Scope.UnmanagedPaths)
writeTomlStringSliceField(&buf, "preserve_on_remove", c.Scope.PreserveOnRemove)
buf.WriteString("\n")
writeTomlSectionHeader(&buf, "optional")
writeTomlBoolField(&buf, "include_optional", c.Optional.IncludeOptional)
writeTomlBoolField(&buf, "client_mods", c.Optional.ClientMods)
buf.WriteString("\n")
writeTomlSectionHeader(&buf, "output")
writeTomlBoolField(&buf, "no_style", c.Output.NoStyle)
writeTomlBoolField(&buf, "json", c.Output.JSON)
return buf.Bytes(), nil
}
func (c *Config) Unmarshal(data []byte) error {
return toml.Unmarshal(data, c)
}
func writeTomlSectionHeader(buf *bytes.Buffer, name string) {
buf.WriteString("[")
buf.WriteString(name)
buf.WriteString("]\n")
}
func writeTomlArrayTableHeader(buf *bytes.Buffer, name string) {
buf.WriteString("[[")
buf.WriteString(name)
buf.WriteString("]]\n")
}
func writeTomlStringField(buf *bytes.Buffer, key, value string) {
buf.WriteString(key)
buf.WriteString(" = ")
buf.WriteString(strconv.Quote(value))
buf.WriteString("\n")
}
func writeTomlBoolField(buf *bytes.Buffer, key string, value bool) {
buf.WriteString(key)
buf.WriteString(" = ")
buf.WriteString(strconv.FormatBool(value))
buf.WriteString("\n")
}
func writeTomlStringSliceField(buf *bytes.Buffer, key string, values []string) {
quoted := make([]string, 0, len(values))
for _, value := range values {
quoted = append(quoted, strconv.Quote(value))
}
buf.WriteString(key)
buf.WriteString(" = [")
buf.WriteString(strings.Join(quoted, ", "))
buf.WriteString("]\n")
}
package state
// StateLayer identifies which conceptual state layer a fact belongs to.
// DesiredState expresses user intent, ResolvedState expresses Lucy's chosen
// exact closure, and ObservedState expresses live facts discovered from the
// environment.
type StateLayer string
const (
// DesiredState is the intent layer. It answers "what should Lucy manage for
// this project?" and is persisted primarily in .lucy/manifest.toml, with
// policy modifiers sourced from .lucy/config.toml.
DesiredState StateLayer = "desired"
// ResolvedState is the fully chosen closure. It answers "what exact graph,
// artifact identity, and provenance did Lucy resolve for the desired state?"
// and is persisted in .lucy/lock.json.
ResolvedState StateLayer = "resolved"
// ObservedState is the probe layer. It answers "what does the current working
// directory actually contain right now?" and stays outside the persistent
// state files in v1.
ObservedState StateLayer = "observed"
)
// StateFile identifies one persistent Lucy state file.
type StateFile string
const (
// ConfigFile stores policy and defaults for this project. It may include
// operator preferences, source or safety defaults, and command behavior
// settings, but it must not declare desired package roots, exact artifact
// hashes, download URLs, or observed runtime facts.
ConfigFile StateFile = ".lucy/config.toml"
// ManifestFile stores desired environment intent as JSON. It owns direct
// roots, managed-scope declarations, and other descriptive statements about
// what the project wants Lucy to converge toward. It must not contain
// lockfile-only fields such as exact transitive closures, hashes, or exact
// download URLs.
ManifestFile StateFile = ".lucy/manifest.json"
// LockFile stores the exact resolved graph and provenance for a manifest. It
// owns exact versions, chosen sources, artifact identity, provenance chains,
// and other reproducibility data. It must not become a dump of live probe
// facts, user policy defaults, or procedural orchestration state.
LockFile StateFile = ".lucy/lock.json"
)
// ExplicitOwnership maps a field class to the file that owns it.
//
// Field classes are intentionally coarse-grained. They define boundary classes
// such as "policy.defaults" or "artifact.hashes" rather than concrete schema
// fields, so the ownership contract can exist before v1 codecs and structs do.
type ExplicitOwnership map[string]StateFile
// DefaultOwnership returns the v1 Option C ownership contract.
func DefaultOwnership() ExplicitOwnership {
return ExplicitOwnership{
"policy.defaults": ConfigFile,
"policy.source-selection": ConfigFile,
"policy.safety": ConfigFile,
"intent.direct-roots": ManifestFile,
"intent.managed-scope": ManifestFile,
"intent.environment": ManifestFile,
"resolution.graph": LockFile,
"resolution.provenance": LockFile,
"artifact.hashes": LockFile,
"artifact.download-urls": LockFile,
}
}
// OwnerOf reports which file owns a field class under the default v1 contract.
func OwnerOf(fieldClass string) (StateFile, bool) {
owner, ok := DefaultOwnership()[fieldClass]
return owner, ok
}
package state
import (
"errors"
"fmt"
"strings"
)
type ErrorKind string
const (
ErrMalformed ErrorKind = "malformed"
ErrBoundaryViolation ErrorKind = "boundary_violation"
ErrVersionUnsupported ErrorKind = "version_unsupported"
ErrIOFailure ErrorKind = "io_failure"
)
type StateError struct {
File StateFile
Kind ErrorKind
Field string
Msg string
}
func NewStateError(file StateFile, kind ErrorKind, field, msg string) StateError {
return StateError{File: file, Kind: kind, Field: field, Msg: msg}
}
func (e StateError) Error() string {
parts := make([]string, 0, 4)
if e.File != "" {
parts = append(parts, string(e.File))
}
if e.Field != "" {
parts = append(parts, e.Field)
}
if e.Kind != "" {
parts = append(parts, string(e.Kind))
}
if e.Msg != "" {
parts = append(parts, e.Msg)
}
if len(parts) == 0 {
return "state error"
}
return strings.Join(parts, ": ")
}
func IsVersionError(err error) bool {
var stateErr StateError
return errors.As(err, &stateErr) && stateErr.Kind == ErrVersionUnsupported
}
func malformedStateError(file StateFile, field string, err error) error {
if err == nil {
return nil
}
return NewStateError(file, ErrMalformed, field, err.Error())
}
func versionStateError(file StateFile, field string, version string, kind ErrorKind) error {
if kind == ErrMalformed {
return NewStateError(file, kind, field, "version is required")
}
return NewStateError(file, kind, field, fmt.Sprintf("unsupported version %q; supported version is %q", version, SupportedVersion))
}
func ioStateError(file StateFile, field, msg string, err error) error {
if err == nil {
return NewStateError(file, ErrIOFailure, field, msg)
}
return NewStateError(file, ErrIOFailure, field, fmt.Sprintf("%s: %v", msg, err))
}
package state
import (
"fmt"
"os"
"path/filepath"
)
// AtomicWrite writes data to path via a temp file in the same directory and
// atomically renames it into place.
func AtomicWrite(path string, data []byte, perm os.FileMode) (err error) {
dir := filepath.Dir(path)
temp, err := os.CreateTemp(dir, filepath.Base(path)+"-*.tmp")
if err != nil {
return fmt.Errorf("atomic write %s: create temp file: %w", path, err)
}
tempPath := temp.Name()
defer func() {
if err != nil {
_ = os.Remove(tempPath)
}
}()
if _, err = temp.Write(data); err != nil {
_ = temp.Close()
return fmt.Errorf("atomic write %s: write temp file: %w", path, err)
}
if err = temp.Chmod(perm); err != nil {
_ = temp.Close()
return fmt.Errorf("atomic write %s: chmod temp file: %w", path, err)
}
if err = temp.Close(); err != nil {
return fmt.Errorf("atomic write %s: close temp file: %w", path, err)
}
if err = os.Rename(tempPath, path); err != nil {
return fmt.Errorf("atomic write %s: rename temp file: %w", path, err)
}
return nil
}
// SafeRead reads a file, treating a missing file as a non-error.
func SafeRead(path string) ([]byte, bool, error) {
data, err := os.ReadFile(path)
if err == nil {
return data, true, nil
}
if os.IsNotExist(err) {
return nil, false, nil
}
return nil, false, fmt.Errorf("read %s: %w", path, err)
}
// EnsureDir creates path and parents if they do not exist.
func EnsureDir(path string) error {
if err := os.MkdirAll(path, 0o755); err != nil {
return fmt.Errorf("ensure dir %s: %w", path, err)
}
return nil
}
// ReadConfig reads .lucy/config.toml from workDir if present.
func ReadConfig(workDir string) (*Config, bool, error) {
path := filepath.Join(workDir, string(ConfigFile))
data, ok, err := SafeRead(path)
if err != nil || !ok {
return nil, ok, err
}
config, err := ParseConfig(data)
if err != nil {
return nil, false, err
}
return config, true, nil
}
// ReadManifest reads .lucy/manifest.json from workDir if present.
// Manifest is the intent layer, including fuzzy versions and compatible-platform hints.
func ReadManifest(workDir string) (*Manifest, bool, error) {
path := filepath.Join(workDir, string(ManifestFile))
data, ok, err := SafeRead(path)
if err != nil || !ok {
return nil, ok, err
}
manifest, err := ParseManifest(data)
if err != nil {
return nil, false, err
}
return manifest, true, nil
}
// ReadLock reads .lucy/lock.json from workDir if present.
// Lock is the exact fact layer for one resolved environment snapshot.
func ReadLock(workDir string) (*Lock, bool, error) {
path := filepath.Join(workDir, string(LockFile))
data, ok, err := SafeRead(path)
if err != nil || !ok {
return nil, ok, err
}
lock, err := ParseLock(data)
if err != nil {
return nil, false, err
}
return lock, true, nil
}
// WriteConfig writes .lucy/config.toml atomically.
func WriteConfig(workDir string, c *Config) error {
data, err := SerializeConfig(c)
if err != nil {
return err
}
dir := filepath.Join(workDir, ".lucy")
if err := EnsureDir(dir); err != nil {
return err
}
return AtomicWrite(filepath.Join(workDir, string(ConfigFile)), data, 0o600)
}
// WriteManifest writes .lucy/manifest.json atomically.
// It preserves fuzzy intent instead of rewriting it to exact lock facts.
func WriteManifest(workDir string, m *Manifest) error {
data, err := SerializeManifest(m)
if err != nil {
return err
}
dir := filepath.Join(workDir, ".lucy")
if err := EnsureDir(dir); err != nil {
return err
}
return AtomicWrite(filepath.Join(workDir, string(ManifestFile)), data, 0o600)
}
// WriteLock writes .lucy/lock.json atomically.
// It persists exact resolved environment and package facts.
func WriteLock(workDir string, l *Lock) error {
data, err := SerializeLock(l)
if err != nil {
return err
}
dir := filepath.Join(workDir, ".lucy")
if err := EnsureDir(dir); err != nil {
return err
}
return AtomicWrite(filepath.Join(workDir, string(LockFile)), data, 0o600)
}
package state
import (
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"github.com/mclucy/lucy/types"
)
// Lock represents Lucy's exact resolved state snapshot.
// It is persisted in .lucy/lock.json and owns resolution.graph,
// resolution.provenance, artifact.hashes, and artifact.download-urls.
//
// Lock MUST NOT own policy defaults, desired roots, or observed runtime state.
type Lock struct {
Version string `json:"version"`
GeneratedAt string `json:"generated_at"`
// ManifestFingerprint binds the exact lock facts to one serialized manifest
// intent document. If the manifest bytes change, the lock is stale even when
// package IDs still overlap.
ManifestFingerprint string `json:"manifest_fingerprint"`
GameVersion string `json:"game_version"`
Platform string `json:"platform"`
PlatformVersion string `json:"platform_version"`
Packages []LockedPackage `json:"packages"`
Bundles []LockedBundle `json:"bundles"`
}
// LockedPackage records one exact resolved artifact and how it entered the
// resolved graph.
type LockedPackage struct {
ID string `json:"id"`
// Version is the final concrete version chosen for this resolved artifact.
// Lock entries are fact records, so fuzzy selectors and ranges are invalid
// here even when the manifest used them as intent.
Version string `json:"version"`
Source string `json:"source"`
URL string `json:"url"`
Filename string `json:"filename"`
Hash string `json:"hash"`
HashAlgorithm string `json:"hash_algorithm"`
InstallPath string `json:"install_path"`
Side string `json:"side"`
Optional bool `json:"optional"`
Embedded bool `json:"embedded"`
EmbeddedIn string `json:"embedded_in,omitempty"`
Provenance []string `json:"provenance,omitempty"`
Requester string `json:"requester"`
}
// LockedBundle records one non-package managed artifact bundle tracked in the
// resolved state.
type LockedBundle struct {
Name string `json:"name"`
Type string `json:"type"`
Hash string `json:"hash"`
InstallPath string `json:"install_path"`
}
// NewLock returns a new v1 lock with the current timestamp in RFC3339 format.
func NewLock() Lock {
return Lock{
Version: SupportedVersion,
GeneratedAt: time.Now().UTC().Format(time.RFC3339),
Packages: []LockedPackage{},
Bundles: []LockedBundle{},
}
}
// ValidateLock validates required fields and v1 resolved-state invariants.
func ValidateLock(l Lock) error {
if err := ValidateVersion(l.Version); err != nil {
if IsVersionError(err) {
return versionStateError(LockFile, "version", l.Version, ErrVersionUnsupported)
}
return versionStateError(LockFile, "version", l.Version, ErrMalformed)
}
if l.GeneratedAt == "" {
return NewStateError(LockFile, ErrMalformed, "generated_at", "generated_at is required")
}
if _, err := time.Parse(time.RFC3339, l.GeneratedAt); err != nil {
return NewStateError(LockFile, ErrMalformed, "generated_at", fmt.Sprintf("generated_at must be RFC3339: %v", err))
}
if l.ManifestFingerprint == "" {
return NewStateError(LockFile, ErrMalformed, "manifest_fingerprint", "manifest_fingerprint is required")
}
if l.GameVersion == "" {
return NewStateError(LockFile, ErrMalformed, "game_version", "game_version is required")
}
if l.Platform == "" {
return NewStateError(LockFile, ErrMalformed, "platform", "platform is required")
}
if err := validateManifestPlatform(l.Platform); err != nil {
return NewStateError(LockFile, ErrMalformed, "platform", err.Error())
}
if l.PlatformVersion == "" {
return NewStateError(LockFile, ErrMalformed, "platform_version", "platform_version is required")
}
for i, pkg := range l.Packages {
if err := validateLockedPackage(pkg); err != nil {
return malformedStateError(LockFile, fmt.Sprintf("packages[%d]", i), err)
}
}
for i, bundle := range l.Bundles {
if err := validateLockedBundle(bundle); err != nil {
return malformedStateError(LockFile, fmt.Sprintf("bundles[%d]", i), err)
}
}
return nil
}
func (l Lock) Marshal() ([]byte, error) {
return json.MarshalIndent(l, "", " ")
}
func (l *Lock) Unmarshal(data []byte) error {
return json.Unmarshal(data, l)
}
func validateLockedPackage(pkg LockedPackage) error {
if pkg.ID == "" {
return fmt.Errorf("id is required")
}
parts := strings.Split(pkg.ID, "/")
if len(parts) != 2 || strings.TrimSpace(parts[0]) == "" || strings.TrimSpace(parts[1]) == "" {
return fmt.Errorf("id must use platform/name format")
}
platform := types.Platform(parts[0])
if !platform.Valid() || platform == types.PlatformAny || platform == types.PlatformMinecraft || platform == types.PlatformUnknown {
return fmt.Errorf("invalid package platform %q", parts[0])
}
if pkg.Version == "" {
return fmt.Errorf("version is required")
}
if !isExactLockVersion(pkg.Version) {
return fmt.Errorf("version must be exact, got %q", pkg.Version)
}
if !isValidLockSource(pkg.Source) {
return fmt.Errorf("invalid source %q", pkg.Source)
}
if pkg.URL == "" {
return fmt.Errorf("url is required")
}
if pkg.Filename == "" {
return fmt.Errorf("filename is required")
}
if pkg.Hash == "" {
return fmt.Errorf("hash is required")
}
if !isValidHashAlgorithm(pkg.HashAlgorithm) {
return fmt.Errorf("invalid hash_algorithm %q", pkg.HashAlgorithm)
}
if pkg.InstallPath == "" {
return fmt.Errorf("install_path is required")
}
if !isValidPackageSide(pkg.Side) {
return fmt.Errorf("invalid side %q", pkg.Side)
}
if len(pkg.Provenance) == 0 {
return fmt.Errorf("provenance is required")
}
for i, step := range pkg.Provenance {
if step == "" {
return fmt.Errorf("provenance[%d] is required", i)
}
}
if pkg.Requester == "" {
return fmt.Errorf("requester is required")
}
if pkg.Embedded {
if pkg.EmbeddedIn == "" {
return fmt.Errorf("embedded_in is required when embedded is true")
}
} else if pkg.EmbeddedIn != "" {
return fmt.Errorf("embedded_in must be empty when embedded is false")
}
return nil
}
func validateLockedBundle(bundle LockedBundle) error {
if bundle.Name == "" {
return fmt.Errorf("name is required")
}
if bundle.Type == "" {
return fmt.Errorf("type is required")
}
if bundle.Hash == "" {
return fmt.Errorf("hash is required")
}
if bundle.InstallPath == "" {
return fmt.Errorf("install_path is required")
}
return nil
}
func isExactLockVersion(version string) bool {
if isSpecialLockVersion(version) {
return false
}
for _, token := range []string{" ", "\t", "\n", "\r", ",", "||", "*", "^", "~", ">", "<", "=", "[", "]", "(", ")"} {
if strings.Contains(version, token) {
return false
}
}
for _, token := range []string{".x", ".X", "-x", "-X", "x.", "X."} {
if strings.Contains(version, token) {
return false
}
}
if strings.EqualFold(version, "x") {
return false
}
return true
}
func isSpecialLockVersion(version string) bool {
switch version {
case "any", "none", "unknown", "latest", "compatible":
return true
default:
return false
}
}
func isValidLockSource(source string) bool {
switch source {
case "modrinth", "curseforge", "github", "mcdr", "direct":
return true
default:
return false
}
}
func isValidHashAlgorithm(algorithm string) bool {
switch algorithm {
case "sha512", "sha1":
return true
default:
return false
}
}
func isValidPackageSide(side string) bool {
switch side {
case "server", "client", "both":
return true
default:
return false
}
}
func CanonicalLockedPackages(packages []LockedPackage) []LockedPackage {
canonical := append([]LockedPackage(nil), packages...)
sort.Slice(canonical, func(i, j int) bool {
if canonical[i].ID != canonical[j].ID {
return canonical[i].ID < canonical[j].ID
}
if canonical[i].Version != canonical[j].Version {
return canonical[i].Version < canonical[j].Version
}
return canonical[i].InstallPath < canonical[j].InstallPath
})
return canonical
}
func PruneLockForManifest(lock *Lock, manifest *Manifest) *Lock {
if lock == nil {
return nil
}
pruned := *lock
pruned.Bundles = append([]LockedBundle(nil), lock.Bundles...)
allowed := make(map[string]struct{})
if manifest != nil {
for _, pkg := range manifest.Packages {
if pkg.Role == RoleIgnored {
continue
}
allowed[pkg.ID] = struct{}{}
}
}
packages := make([]LockedPackage, 0, len(lock.Packages))
for _, pkg := range lock.Packages {
if _, ok := allowed[pkg.ID]; ok {
packages = append(packages, pkg)
}
}
pruned.Packages = CanonicalLockedPackages(packages)
return &pruned
}
package state
import (
"encoding/json"
"fmt"
"sort"
"strings"
"github.com/mclucy/lucy/types"
)
// Manifest stores the desired environment intent for a Lucy project.
// It is persisted in .lucy/manifest.json.
//
// Manifest OWNS: intent.direct-roots, intent.managed-scope, intent.environment
// Manifest MUST NOT own: resolution.graph, artifact.hashes,
// artifact.download-urls
type Manifest struct {
FormatVersion string `json:"format_version"`
Environment ManifestEnvironment `json:"environment"`
Packages []ManifestPackage `json:"packages"`
Bundles []ManifestBundle `json:"bundles"`
}
type ManifestEnvironment struct {
GameVersion string `json:"game_version"`
ServerCore string `json:"server_core"`
ServerCoreVersion string `json:"server_core_version"`
ModdingPlatform string `json:"modding_platform"`
ModdingPlatformVersion string `json:"modding_platform_version"`
CompatiblePlatforms []string `json:"compatible_platforms"`
Mcdr bool `json:"mcdr"`
DeclaredCapabilities []string `json:"declared_capabilities"`
}
type ManifestSide string
const (
SideServer ManifestSide = "server"
SideClient ManifestSide = "client"
SideBoth ManifestSide = "both"
SideUnknown ManifestSide = "unknown"
)
type ManifestPackage struct {
ID string `json:"id"`
// Version stores version intent exactly as written in the manifest.
//
// It may be an exact version or a fuzzy selector such as "latest",
// "compatible", or a future range/non-exact preference. The manifest is the
// intent layer, so Lucy must preserve this string verbatim instead of
// rewriting it to the currently resolved exact version.
Version string `json:"version"`
Source string `json:"source"`
// Role defines how Lucy should treat this package in desired state.
//
// - required: explicit operator intent, including user-selected leaf nodes during adopt
// - transitive: resolver-derived dependency Lucy may auto-prune when no longer needed
// - ignored: known content Lucy sees but must leave outside sync responsibility
//
// Non-leaf nodes remain visible to init/adopt users because Minecraft package
// boundaries are often fuzzy, but that visibility must not become a fourth role.
Role ManifestRole `json:"role"`
Side ManifestSide `json:"side"`
Optional bool `json:"optional"`
Pinned bool `json:"pinned"`
}
type ManifestRole string
const (
RoleRequired ManifestRole = "required"
RoleTransitive ManifestRole = "transitive"
RoleIgnored ManifestRole = "ignored"
)
type ClassifiedPackage struct {
ID string
Version string
Source string
Role ManifestRole
Side ManifestSide
Optional bool
Pinned bool
}
type BundleType string
const (
BundleTypeConfig BundleType = "config"
BundleTypeDatapack BundleType = "datapack"
BundleTypeResourcepack BundleType = "resourcepack"
BundleTypeKubeJS BundleType = "kubejs"
BundleTypeCustom BundleType = "custom"
)
type ManifestBundle struct {
Name string `json:"name"`
Type BundleType `json:"type"`
Path string `json:"path"`
Source string `json:"source"`
Optional bool `json:"optional"`
}
func ManifestDefaults() Manifest {
return Manifest{
FormatVersion: SupportedVersion,
Environment: ManifestEnvironment{
GameVersion: "",
ServerCore: "",
ServerCoreVersion: "",
ModdingPlatform: "",
ModdingPlatformVersion: "",
CompatiblePlatforms: []string{},
Mcdr: false,
DeclaredCapabilities: []string{},
},
Packages: []ManifestPackage{},
Bundles: []ManifestBundle{},
}
}
func ValidateManifest(m Manifest) error {
if err := ValidateVersion(m.FormatVersion); err != nil {
if IsVersionError(err) {
return versionStateError(ManifestFile, "format_version", m.FormatVersion, ErrVersionUnsupported)
}
return versionStateError(ManifestFile, "format_version", m.FormatVersion, ErrMalformed)
}
if err := ValidateManifestEnvironment(m.Environment); err != nil {
return err
}
for i, pkg := range m.Packages {
if err := validateManifestPackage(pkg); err != nil {
return malformedStateError(ManifestFile, fmt.Sprintf("packages[%d]", i), err)
}
}
for i, bundle := range m.Bundles {
if err := validateManifestBundle(bundle); err != nil {
return malformedStateError(ManifestFile, fmt.Sprintf("bundles[%d]", i), err)
}
}
return nil
}
func ValidateManifestEnvironment(env ManifestEnvironment) error {
platform := strings.TrimSpace(env.ModdingPlatform)
version := strings.TrimSpace(env.ModdingPlatformVersion)
if platform == "" {
if version != "" {
return NewStateError(ManifestFile, ErrMalformed, "environment.modding_platform_version", "environment.modding_platform_version requires environment.modding_platform")
}
if len(env.CompatiblePlatforms) > 0 {
return NewStateError(ManifestFile, ErrMalformed, "environment.compatible_platforms", "environment.compatible_platforms requires environment.modding_platform")
}
return nil
}
switch platform {
case "none", "fabric", "forge", "neoforge", "mcdr":
default:
return NewStateError(ManifestFile, ErrMalformed, "environment.modding_platform", fmt.Sprintf("invalid environment.modding_platform %q", env.ModdingPlatform))
}
if platform == "none" && len(env.CompatiblePlatforms) > 0 {
return NewStateError(ManifestFile, ErrMalformed, "environment.compatible_platforms", "environment.compatible_platforms requires a non-vanilla environment.modding_platform")
}
seen := make(map[string]struct{}, len(env.CompatiblePlatforms))
for i, raw := range env.CompatiblePlatforms {
value := strings.TrimSpace(raw)
if value == "" {
return NewStateError(ManifestFile, ErrMalformed, fmt.Sprintf("environment.compatible_platforms[%d]", i), "environment.compatible_platforms entries must be non-empty")
}
switch value {
case "fabric", "forge", "neoforge", "mcdr", "sinytra":
default:
return NewStateError(ManifestFile, ErrMalformed, fmt.Sprintf("environment.compatible_platforms[%d]", i), fmt.Sprintf("invalid compatible platform %q", raw))
}
if value == platform {
return NewStateError(ManifestFile, ErrMalformed, fmt.Sprintf("environment.compatible_platforms[%d]", i), fmt.Sprintf("compatible platform %q duplicates environment.modding_platform", raw))
}
if _, ok := seen[value]; ok {
return NewStateError(ManifestFile, ErrMalformed, fmt.Sprintf("environment.compatible_platforms[%d]", i), fmt.Sprintf("duplicate compatible platform %q", raw))
}
seen[value] = struct{}{}
}
if _, hasSinytra := seen["sinytra"]; hasSinytra && platform != "neoforge" {
return NewStateError(ManifestFile, ErrMalformed, "environment.compatible_platforms", "environment.compatible_platforms cannot include \"sinytra\" unless environment.modding_platform is \"neoforge\"")
}
return nil
}
// validateManifestPlatform remains as a legacy helper for the pre-Task-2 lock
// schema, which still validates a single platform field.
func validateManifestPlatform(value string) error {
platform := types.Platform(strings.TrimSpace(value))
if platform == "" {
return fmt.Errorf("environment.platform is required")
}
switch platform {
case types.PlatformFabric, types.PlatformNeoforge, types.PlatformForge, types.PlatformMCDR, types.PlatformNone:
return nil
default:
return fmt.Errorf("invalid environment.platform %q", value)
}
}
func validateManifestPackage(pkg ManifestPackage) error {
if strings.TrimSpace(pkg.ID) == "" {
return fmt.Errorf("id is required")
}
if strings.Contains(pkg.ID, "@") {
return fmt.Errorf("id must use platform/name format without version")
}
parts := strings.Split(pkg.ID, "/")
if len(parts) != 2 || strings.TrimSpace(parts[0]) == "" || strings.TrimSpace(parts[1]) == "" {
return fmt.Errorf("id must use platform/name format")
}
platform := types.Platform(parts[0])
if !platform.Valid() || platform == types.PlatformAny || platform == types.PlatformMinecraft || platform == types.PlatformUnknown {
return fmt.Errorf("invalid package platform %q", parts[0])
}
if strings.TrimSpace(pkg.Version) == "" {
return fmt.Errorf("version is required")
}
version := types.RawVersion(pkg.Version)
if version.IsInvalid() {
return fmt.Errorf("invalid version %q", pkg.Version)
}
if types.ParseSource(pkg.Source) == types.SourceUnknown {
return fmt.Errorf("invalid source %q", pkg.Source)
}
switch pkg.Role {
case RoleRequired, RoleTransitive, RoleIgnored:
case "":
return fmt.Errorf("role is required")
default:
return fmt.Errorf("invalid role %q; expected one of required, transitive, ignored", pkg.Role)
}
switch pkg.Side {
case SideServer, SideClient, SideBoth, SideUnknown:
default:
return fmt.Errorf("invalid side %q", pkg.Side)
}
return nil
}
func CompatiblePlatformOptions(primary string) []string {
switch strings.TrimSpace(primary) {
case "neoforge":
return []string{"fabric", "mcdr", "sinytra"}
case "fabric", "forge":
return []string{"mcdr"}
default:
return nil
}
}
func NormalizeManifestVersionIntent(version types.RawVersion) string {
trimmed := strings.TrimSpace(version.String())
switch trimmed {
case "", "any", "none", "unknown":
return types.VersionCompatible.String()
default:
return trimmed
}
}
func UpsertManifestRequiredIntent(manifest *Manifest, id types.PackageId, source string) *Manifest {
if manifest == nil {
defaults := ManifestDefaults()
manifest = &defaults
} else {
clone := *manifest
clone.Environment.CompatiblePlatforms = append([]string(nil), manifest.Environment.CompatiblePlatforms...)
clone.Environment.DeclaredCapabilities = append([]string(nil), manifest.Environment.DeclaredCapabilities...)
clone.Packages = append([]ManifestPackage(nil), manifest.Packages...)
clone.Bundles = append([]ManifestBundle(nil), manifest.Bundles...)
manifest = &clone
}
resolvedSource := strings.TrimSpace(source)
if types.ParseSource(resolvedSource) == types.SourceUnknown {
resolvedSource = "auto"
}
intentVersion := NormalizeManifestVersionIntent(id.Version)
for i := range manifest.Packages {
if manifest.Packages[i].ID != id.StringPlatformName() {
continue
}
manifest.Packages[i].Version = intentVersion
manifest.Packages[i].Source = resolvedSource
manifest.Packages[i].Role = RoleRequired
if manifest.Packages[i].Side == "" {
manifest.Packages[i].Side = SideUnknown
}
sort.Slice(manifest.Packages, func(i, j int) bool {
return manifest.Packages[i].ID < manifest.Packages[j].ID
})
return manifest
}
manifest.Packages = append(manifest.Packages, ManifestPackage{
ID: id.StringPlatformName(),
Version: intentVersion,
Source: resolvedSource,
Role: RoleRequired,
Side: SideUnknown,
})
sort.Slice(manifest.Packages, func(i, j int) bool {
return manifest.Packages[i].ID < manifest.Packages[j].ID
})
return manifest
}
func validateManifestBundle(bundle ManifestBundle) error {
if strings.TrimSpace(bundle.Name) == "" {
return fmt.Errorf("name is required")
}
if strings.TrimSpace(bundle.Path) == "" {
return fmt.Errorf("path is required")
}
if strings.TrimSpace(bundle.Source) == "" {
return fmt.Errorf("source is required")
}
switch bundle.Type {
case BundleTypeConfig, BundleTypeDatapack, BundleTypeResourcepack, BundleTypeKubeJS, BundleTypeCustom:
return nil
default:
return fmt.Errorf("invalid type %q", bundle.Type)
}
}
func ManifestPackagesFromClassified(classified []ClassifiedPackage) []ManifestPackage {
packages := make([]ManifestPackage, 0, len(classified))
for _, pkg := range classified {
id := strings.TrimSpace(pkg.ID)
if id == "" {
continue
}
version := strings.TrimSpace(pkg.Version)
if version == "" {
version = types.VersionCompatible.String()
}
source := strings.TrimSpace(pkg.Source)
if types.ParseSource(source) == types.SourceUnknown {
source = "auto"
}
role := pkg.Role
if role == "" {
role = RoleTransitive
}
side := pkg.Side
if side == "" {
side = SideUnknown
}
packages = append(packages, ManifestPackage{
ID: id,
Version: version,
Source: source,
Role: role,
Side: side,
Optional: pkg.Optional,
Pinned: pkg.Pinned,
})
}
sort.Slice(packages, func(i, j int) bool {
return packages[i].ID < packages[j].ID
})
return packages
}
func UpdateManifestRolesForAdd(manifest *Manifest, requested []types.PackageId, lock *Lock) *Manifest {
base := cloneManifestOrDefaults(manifest)
required := manifestPackagesByRole(base.Packages, RoleRequired)
ignored := manifestPackagesByRole(base.Packages, RoleIgnored)
for _, id := range requested {
resolvedID := resolveManifestPackageID(id, &base, lock)
if resolvedID == "" {
continue
}
if _, keepIgnored := ignored[resolvedID]; keepIgnored {
continue
}
pkg, ok := manifestPackageByID(base.Packages, resolvedID)
if !ok {
pkg = defaultManifestPackageForID(resolvedID)
}
pkg.ID = resolvedID
pkg.Role = RoleRequired
pkg.Version = requestedManifestVersion(id, pkg.Version)
pkg.Source = normalizedManifestSource(pkg.Source)
if pkg.Side == "" {
pkg.Side = SideUnknown
}
required[resolvedID] = pkg
}
base.Packages = rebuildManifestPackages(required, ignored, lock)
return &base
}
func UpdateManifestRolesForRemove(manifest *Manifest, removed []types.PackageId, lock *Lock) *Manifest {
base := cloneManifestOrDefaults(manifest)
required := manifestPackagesByRole(base.Packages, RoleRequired)
ignored := manifestPackagesByRole(base.Packages, RoleIgnored)
for _, id := range removed {
resolvedID := resolveManifestPackageID(id, &base, lock)
if resolvedID == "" {
continue
}
if _, keepIgnored := ignored[resolvedID]; keepIgnored {
continue
}
delete(required, resolvedID)
}
base.Packages = rebuildManifestPackages(required, ignored, lock)
return &base
}
func cloneManifestOrDefaults(manifest *Manifest) Manifest {
if manifest == nil {
return ManifestDefaults()
}
cloned := *manifest
cloned.Environment.CompatiblePlatforms = append([]string(nil), manifest.Environment.CompatiblePlatforms...)
cloned.Environment.DeclaredCapabilities = append([]string(nil), manifest.Environment.DeclaredCapabilities...)
cloned.Packages = append([]ManifestPackage(nil), manifest.Packages...)
cloned.Bundles = append([]ManifestBundle(nil), manifest.Bundles...)
normalizeManifest(&cloned)
return cloned
}
func manifestPackagesByRole(packages []ManifestPackage, role ManifestRole) map[string]ManifestPackage {
indexed := make(map[string]ManifestPackage)
for _, pkg := range packages {
if pkg.Role != role {
continue
}
indexed[pkg.ID] = pkg
}
return indexed
}
func manifestPackageByID(packages []ManifestPackage, id string) (ManifestPackage, bool) {
for _, pkg := range packages {
if pkg.ID == id {
return pkg, true
}
}
return ManifestPackage{}, false
}
func rebuildManifestPackages(required map[string]ManifestPackage, ignored map[string]ManifestPackage, lock *Lock) []ManifestPackage {
classified := make([]ClassifiedPackage, 0, len(required)+len(ignored))
requiredIDs := make(map[string]struct{}, len(required))
ignoredIDs := make(map[string]struct{}, len(ignored))
for id, pkg := range required {
requiredIDs[id] = struct{}{}
classified = append(classified, ClassifiedPackage{
ID: id,
Version: pkg.Version,
Source: normalizedManifestSource(pkg.Source),
Role: RoleRequired,
Side: normalizedManifestSide(pkg.Side),
Optional: pkg.Optional,
Pinned: pkg.Pinned,
})
}
for id, pkg := range ignored {
ignoredIDs[id] = struct{}{}
classified = append(classified, ClassifiedPackage{
ID: id,
Version: pkg.Version,
Source: normalizedManifestSource(pkg.Source),
Role: RoleIgnored,
Side: normalizedManifestSide(pkg.Side),
Optional: pkg.Optional,
Pinned: pkg.Pinned,
})
}
if lock != nil {
for _, locked := range lock.Packages {
if _, keepIgnored := ignoredIDs[locked.ID]; keepIgnored {
continue
}
if _, isRequired := requiredIDs[locked.ID]; isRequired {
continue
}
if !lockedPackageReachableFromRequired(locked, requiredIDs) {
continue
}
classified = append(classified, ClassifiedPackage{
ID: locked.ID,
Version: locked.Version,
Source: normalizedManifestSource(locked.Source),
Role: RoleTransitive,
Side: normalizedManifestSide(ManifestSide(locked.Side)),
Optional: locked.Optional,
})
}
}
return ManifestPackagesFromClassified(classified)
}
func lockedPackageReachableFromRequired(pkg LockedPackage, required map[string]struct{}) bool {
for _, step := range pkg.Provenance {
id := normalizeProvenanceStep(step)
if id == "" || id == "root" {
continue
}
if _, ok := required[id]; ok {
return true
}
}
requester := normalizeProvenanceStep(pkg.Requester)
if requester == "" || requester == "root" {
return false
}
_, ok := required[requester]
return ok
}
func normalizeProvenanceStep(step string) string {
trimmed := strings.TrimSpace(step)
if trimmed == "" || trimmed == "root" {
return trimmed
}
if prefix, _, ok := strings.Cut(trimmed, "@"); ok {
return prefix
}
return trimmed
}
func requestedManifestVersion(id types.PackageId, fallback string) string {
if id.Version == types.VersionAny {
if strings.TrimSpace(fallback) != "" {
return fallback
}
return types.VersionCompatible.String()
}
return id.Version.String()
}
func normalizedManifestSource(source string) string {
if types.ParseSource(source) == types.SourceUnknown {
return "auto"
}
if source == "" {
return "auto"
}
return source
}
func normalizedManifestSide(side ManifestSide) ManifestSide {
switch side {
case SideServer, SideClient, SideBoth, SideUnknown:
return side
default:
return SideUnknown
}
}
func defaultManifestPackageForID(id string) ManifestPackage {
return ManifestPackage{
ID: id,
Version: types.VersionCompatible.String(),
Source: "auto",
Role: RoleRequired,
Side: SideUnknown,
}
}
func resolveManifestPackageID(id types.PackageId, manifest *Manifest, lock *Lock) string {
if id.IsIdentityPackage() {
id.NormalizeIdentityPackage()
}
if id.Platform != types.PlatformAny && id.Platform != types.PlatformUnknown {
return id.StringPlatformName()
}
if manifest != nil {
candidate := resolveIDByName(id.Name, manifestPackageIDs(manifest.Packages))
if candidate != "" {
return candidate
}
}
if lock != nil {
ids := make([]string, 0, len(lock.Packages))
for _, pkg := range lock.Packages {
ids = append(ids, pkg.ID)
}
candidate := resolveIDByName(id.Name, ids)
if candidate != "" {
return candidate
}
}
return id.StringPlatformName()
}
func manifestPackageIDs(packages []ManifestPackage) []string {
ids := make([]string, 0, len(packages))
for _, pkg := range packages {
ids = append(ids, pkg.ID)
}
return ids
}
func resolveIDByName(name types.ProjectName, ids []string) string {
var match string
for _, id := range ids {
parts := strings.Split(id, "/")
if len(parts) != 2 || strings.TrimSpace(parts[1]) == "" {
continue
}
if parts[1] != name.String() {
continue
}
if match != "" && match != id {
return ""
}
match = id
}
return match
}
func (m Manifest) Marshal() ([]byte, error) {
return json.MarshalIndent(m, "", " ")
}
func (m *Manifest) Unmarshal(data []byte) error {
if err := json.Unmarshal(data, m); err != nil {
return err
}
normalizeManifest(m)
return nil
}
func normalizeManifest(m *Manifest) {
if m == nil {
return
}
if m.Environment.DeclaredCapabilities == nil {
m.Environment.DeclaredCapabilities = []string{}
}
if m.Environment.CompatiblePlatforms == nil {
m.Environment.CompatiblePlatforms = []string{}
}
if m.Packages == nil {
m.Packages = []ManifestPackage{}
}
if m.Bundles == nil {
m.Bundles = []ManifestBundle{}
}
}
package state
import (
"path"
"strings"
)
type ArtifactClass string
const (
ClassMod ArtifactClass = "mod"
ClassPlugin ArtifactClass = "plugin"
ClassConfig ArtifactClass = "config"
ClassDatapack ArtifactClass = "datapack"
ClassResourcePack ArtifactClass = "resourcepack"
ClassKubeJS ArtifactClass = "kubejs"
ClassEmbedded ArtifactClass = "embedded"
ClassUnmanaged ArtifactClass = "unmanaged"
)
type ManagedRoot struct {
Path string
Class ArtifactClass
}
type ManagedScope struct {
Roots []ManagedRoot
UnmanagedPatterns []string
}
func DefaultManagedRoots() []ManagedRoot {
return []ManagedRoot{
{Path: "mods", Class: ClassMod},
{Path: "plugins", Class: ClassPlugin},
{Path: "config", Class: ClassConfig},
}
}
func NewManagedScope(roots []string, unmanagedPatterns []string) ManagedScope {
if len(roots) == 0 {
defaults := DefaultManagedRoots()
return ManagedScope{Roots: defaults, UnmanagedPatterns: normalizePatterns(unmanagedPatterns)}
}
managedRoots := make([]ManagedRoot, 0, len(roots))
for _, root := range roots {
normalized := normalizeRelativePath(root)
if normalized == "." || normalized == "" {
continue
}
managedRoots = append(managedRoots, ManagedRoot{
Path: normalized,
Class: classifyManagedRoot(normalized),
})
}
return ManagedScope{Roots: managedRoots, UnmanagedPatterns: normalizePatterns(unmanagedPatterns)}
}
func IsManaged(scope ManagedScope, relPath string) bool {
normalized := normalizeRelativePath(relPath)
if normalized == "." || normalized == "" {
return false
}
if matchesAnyPattern(scope.UnmanagedPatterns, normalized) {
return false
}
_, ok := managedRootForPath(scope, normalized)
return ok
}
func ClassifyPath(scope ManagedScope, relPath string) ArtifactClass {
normalized := normalizeRelativePath(relPath)
if normalized == "." || normalized == "" {
return ClassUnmanaged
}
if !IsManaged(scope, normalized) {
return ClassUnmanaged
}
root, ok := managedRootForPath(scope, normalized)
if !ok {
return ClassUnmanaged
}
return root.Class
}
func normalizePatterns(patterns []string) []string {
normalized := make([]string, 0, len(patterns))
for _, pattern := range patterns {
value := normalizeRelativePath(pattern)
if value == "." || value == "" {
continue
}
normalized = append(normalized, value)
}
return normalized
}
func managedRootForPath(scope ManagedScope, relPath string) (ManagedRoot, bool) {
for _, root := range scope.Roots {
if pathWithinRoot(relPath, root.Path) {
return root, true
}
}
return ManagedRoot{}, false
}
func classifyManagedRoot(root string) ArtifactClass {
switch {
case pathWithinRoot(root, "mods"):
return ClassMod
case pathWithinRoot(root, "plugins"):
return ClassPlugin
case pathWithinRoot(root, "config"):
return ClassConfig
case strings.Contains(root, "datapacks"):
return ClassDatapack
case pathWithinRoot(root, "resourcepacks"):
return ClassResourcePack
case pathWithinRoot(root, "kubejs"):
return ClassKubeJS
default:
return ClassConfig
}
}
func matchesAnyPattern(patterns []string, relPath string) bool {
for _, pattern := range patterns {
if globMatch(pattern, relPath) {
return true
}
}
return false
}
func globMatch(pattern string, relPath string) bool {
pattern = normalizeRelativePath(pattern)
relPath = normalizeRelativePath(relPath)
if pattern == relPath {
return true
}
if prefix, ok := strings.CutSuffix(pattern, "/**"); ok {
return pathWithinRoot(relPath, prefix)
}
matched, err := path.Match(pattern, relPath)
return err == nil && matched
}
func pathWithinRoot(relPath string, root string) bool {
relPath = normalizeRelativePath(relPath)
root = normalizeRelativePath(root)
if relPath == root {
return true
}
return strings.HasPrefix(relPath, root+"/")
}
func normalizeRelativePath(value string) string {
trimmed := strings.TrimSpace(strings.ReplaceAll(value, "\\", "/"))
if trimmed == "" {
return ""
}
cleaned := path.Clean(trimmed)
if cleaned == "." {
return cleaned
}
return strings.TrimPrefix(cleaned, "./")
}
package state
import (
"context"
"errors"
"os"
"path/filepath"
)
type ProjectStateService struct {
workDir string
config *Config
manifest *Manifest
lock *Lock
loaded bool
}
func NewProjectStateService(workDir string) *ProjectStateService {
return &ProjectStateService{workDir: workDir}
}
func (s *ProjectStateService) Load(ctx context.Context) error {
if s.loaded {
return nil
}
if err := ctx.Err(); err != nil {
return err
}
if s.workDir == "" {
return ioStateError("", "workDir", "workDir is required", nil)
}
cfg, err := loadConfig(ctx, s.workDir)
if err != nil {
return err
}
manifest, err := loadManifest(ctx, s.workDir)
if err != nil {
return err
}
lock, err := loadLock(ctx, s.workDir)
if err != nil {
return err
}
s.config = cfg
s.manifest = manifest
s.lock = lock
s.loaded = true
return nil
}
func (s *ProjectStateService) Reload(ctx context.Context) error {
s.Invalidate()
return s.Load(ctx)
}
func (s *ProjectStateService) Invalidate() {
s.config = nil
s.manifest = nil
s.lock = nil
s.loaded = false
}
func (s *ProjectStateService) Config() *Config { return s.config }
func (s *ProjectStateService) Manifest() *Manifest { return s.manifest }
func (s *ProjectStateService) Lock() *Lock { return s.lock }
func (s *ProjectStateService) Save(ctx context.Context, cfg *Config, m *Manifest, l *Lock) error {
if err := ctx.Err(); err != nil {
return err
}
if s.workDir == "" {
return ioStateError("", "workDir", "workDir is required", nil)
}
if cfg != nil {
data, err := SerializeConfig(cfg)
if err != nil {
return err
}
if err := writeStateFile(ctx, filepath.Join(s.workDir, string(ConfigFile)), ConfigFile, data); err != nil {
return err
}
}
if m != nil {
data, err := SerializeManifest(m)
if err != nil {
return err
}
if err := writeStateFile(ctx, filepath.Join(s.workDir, string(ManifestFile)), ManifestFile, data); err != nil {
return err
}
}
if l != nil {
data, err := SerializeLock(l)
if err != nil {
return err
}
if err := writeStateFile(ctx, filepath.Join(s.workDir, string(LockFile)), LockFile, data); err != nil {
return err
}
}
s.config = cfg
s.manifest = m
s.lock = l
s.loaded = true
return nil
}
func loadConfig(ctx context.Context, workDir string) (*Config, error) {
data, err := readStateFile(ctx, filepath.Join(workDir, string(ConfigFile)), ConfigFile)
if err != nil || data == nil {
return nil, err
}
config, err := ParseConfig(data)
if err != nil {
return nil, malformedStateError(ConfigFile, "document", err)
}
return config, nil
}
func loadManifest(ctx context.Context, workDir string) (*Manifest, error) {
data, err := readStateFile(ctx, filepath.Join(workDir, string(ManifestFile)), ManifestFile)
if err != nil || data == nil {
return nil, err
}
manifest, err := ParseManifest(data)
if err != nil {
return nil, malformedStateError(ManifestFile, "document", err)
}
return manifest, nil
}
func loadLock(ctx context.Context, workDir string) (*Lock, error) {
data, err := readStateFile(ctx, filepath.Join(workDir, string(LockFile)), LockFile)
if err != nil || data == nil {
return nil, err
}
lock, err := ParseLock(data)
if err != nil {
return nil, malformedStateError(LockFile, "document", err)
}
return lock, nil
}
func readStateFile(ctx context.Context, path string, file StateFile) ([]byte, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
data, err := os.ReadFile(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, nil
}
return nil, ioStateError(file, "document", "read failed", err)
}
return data, nil
}
func writeStateFile(ctx context.Context, path string, file StateFile, data []byte) error {
if err := ctx.Err(); err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
return ioStateError(file, "document", "mkdir failed", err)
}
if err := AtomicWrite(path, data, 0o644); err != nil {
return ioStateError(file, "document", "write failed", err)
}
return nil
}
package state
const SupportedVersion = "v1"
func ValidateVersion(version string) error {
if version == "" {
return NewStateError("", ErrMalformed, "version", "version is required")
}
if version != SupportedVersion {
return NewStateError("", ErrVersionUnsupported, "version", "unsupported version \""+version+"\"; supported version is \""+SupportedVersion+"\"")
}
return nil
}
func IsSupported(version string) bool {
return version == SupportedVersion
}
// Package syntax defines the syntax for specifying packages and platforms.
//
// A package can either be specified by a string in the format of
// "platform/name@version". Only the name is required, both platform and version
// can be omitted.
//
// Valid Examples:
// - carpet
// - mcdr/prime-backup
// - fabric/jade@1.0.0
// - fabric@12.0
// - minecraft@1.19 (recommended)
// - minecraft/minecraft@1.16.5 (= minecraft@1.16.5)
// - 1.8.9 (= minecraft@1.8.9)
package syntax
import (
"errors"
"strings"
"github.com/mclucy/lucy/types"
)
func ToProjectName(s string) types.ProjectName {
return types.ProjectName(sanitize(s))
}
// sanitize tolerates some common interchangeability between characters. This
// includes underscores, chinese full stops, and backslashes. It also converts
// uppercase characters to lowercase.
func sanitize(s string) string {
var b strings.Builder
b.Grow(len(s))
for _, char := range s {
switch {
case char == '_':
b.WriteByte('-')
case char == '\\':
b.WriteByte('/')
case char == '。':
b.WriteByte('.')
case 'A' <= char && char <= 'Z':
b.WriteRune(char + 'a' - 'A')
default:
b.WriteRune(char)
}
}
return b.String()
}
var (
ESyntax = errors.New("invalid syntax")
EPlatform = errors.New("invalid platform")
)
// Parse is exported to parse a string into a PackageId struct.
// Returns the parsed PackageId and an error if parsing fails.
func Parse(s string) (id types.PackageId, err error) {
s = sanitize(s)
id = types.PackageId{}
id.Platform, id.Name, id.Version, err = parseOperatorAt(s)
if err != nil {
return types.PackageId{}, err
}
id.NormalizeIdentityPackage()
return id, nil
}
// parseOperatorAt is called first since '@' operator always occur after '/' (equivalent
// to a lower priority).
func parseOperatorAt(s string) (
pl types.Platform,
n types.ProjectName,
v types.RawVersion,
err error,
) {
split := strings.Split(s, "@")
pl, n, err = parseOperatorSlash(split[0])
if err != nil {
return "", "", "", ESyntax
}
if len(split) == 1 {
v = types.VersionAny
} else if len(split) == 2 {
v = types.RawVersion(split[1])
if v == types.VersionNone {
return "", "", "", ESyntax
}
} else {
return "", "", "", ESyntax
}
return
}
func parseOperatorSlash(s string) (
pl types.Platform,
n types.ProjectName,
err error,
) {
split := strings.Split(s, "/")
if len(split) == 1 {
pl = types.PlatformAny
n = types.ProjectName(split[0])
if types.Platform(n).Valid() {
// Remember, all platforms are also valid packages under themselves.
// This literal is for users to specify the platform itself.
// This means the user specified a platform name directly.
pl = types.Platform(n)
n = types.ProjectName(pl)
}
} else if len(split) == 2 {
pl = types.Platform(split[0])
if !pl.Valid() {
return "", "", EPlatform
}
n = types.ProjectName(split[1])
} else {
return "", "", ESyntax
}
return
}
package main
import (
"flag"
"fmt"
"io"
"os"
"github.com/mclucy/lucy/internal/cipher"
)
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0])
fmt.Fprintf(os.Stderr, "\nOptions:\n")
fmt.Fprintf(os.Stderr, " -keygen Generate new random key\n")
fmt.Fprintf(os.Stderr, " -encrypt KEY Encrypt KEY with Key\n")
flag.PrintDefaults()
}
keygen := flag.Bool("keygen", false, "generate new key")
encrypt := flag.String("encrypt", "", "encrypt API key")
flag.Parse()
switch {
case *keygen:
var data [32]byte
f, err := os.Open("/dev/urandom")
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
io.ReadFull(f, data[:])
f.Close()
fmt.Printf("cipher_key=%x\n", data)
os.Exit(0)
case *encrypt != "":
ciphertext, err := cipher.Encrypt(*encrypt)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
fmt.Printf("cipher_ciphertext=%s\n", ciphertext)
default:
flag.Usage()
}
}
package tools
import (
"reflect"
)
// Exists checks if an element exists in a slice. It returns true if the element
// is found, and false otherwise.
func Exists[T comparable](arr []T, elem T) bool {
for _, v := range arr {
if v == elem {
return true
}
}
return false
}
func Count[T comparable](arr []T, elem T) int {
count := 0
for _, v := range arr {
if v == elem {
count++
}
}
return count
}
func ForEach[T any](arr []T, fn func(T)) {
for _, v := range arr {
fn(v)
}
}
func ForEachOnMatrix[T any](mat [][]T, fn func(T)) {
for _, row := range mat {
for _, v := range row {
fn(v)
}
}
}
func ForEachRecursive[T any](arr []any, fn func(T)) {
if arr == nil {
return
}
eType := reflect.TypeOf(fn).In(0)
for _, v := range arr {
if v == nil {
continue
}
val := reflect.ValueOf(v)
// Recursively handle slices
if val.Kind() == reflect.Slice {
sliceAny := make([]any, val.Len())
for i := 0; i < val.Len(); i++ {
sliceAny[i] = val.Index(i).Interface()
}
ForEachRecursive(sliceAny, fn)
continue
}
// Call the function if types match
if val.Type().AssignableTo(eType) {
fn(v.(T))
}
}
}
func IsEmptyVector[T any](arr []T) bool {
if len(arr) == 0 {
return true
}
for _, e := range arr {
if !isEmptyVectorValue(reflect.ValueOf(e)) {
return false
}
}
return true
}
func isEmptyVectorValue(v reflect.Value) bool {
if !v.IsValid() {
return true
}
if v.Kind() != reflect.Slice {
return false
}
if v.Len() == 0 {
return true
}
for i := 0; i < v.Len(); i++ {
if !isEmptyVectorValue(v.Index(i)) {
return false
}
}
return true
}
package tools
import (
"io"
"net/http"
"os"
"path/filepath"
"slices"
"strings"
"sync"
"time"
"github.com/charmbracelet/glamour"
)
const CRLF = "\r\n"
// TernaryFunc gives a if expr == true, b if expr == false. For a simple
// bool expression, use Ternary instead.
func TernaryFunc[T any](expr func() bool, a T, b T) T {
if expr() {
return a
}
return b
}
// Ternary returns a if v == true, b if v == false. For a function parameter, use
// TernaryFunc instead.
//
// Do not use this in a loop or a performance-critical code path, as it may cause
// unnecessary evaluations of a and b.
func Ternary[T any](v bool, a T, b T) T {
if v {
return a
}
return b
}
func TernaryLazy[T any](v bool, a func() T, b func() T) T {
if v {
return a()
}
return b()
}
// Memoize is only used for functions that do not take any arguments and return
// a value (typically a struct) that can be treated as a constant.
func Memoize[T any](f func() T) func() T {
var res T
var once sync.Once
return func() T {
once.Do(
func() {
res = f()
},
)
return res
}
}
func MemoizeE[T any](f func() (T, error)) func() (T, error) {
var res T
var err error
var once sync.Once
return func() (T, error) {
once.Do(
func() {
res, err = f()
},
)
return res, err
}
}
// Insert inserts a value into a slice at a slice[pos]. If the pos is out of
// bounds, the slice remains unchanged.
func Insert[T any](slice []T, pos int, value ...T) []T {
if pos < 0 || pos > len(slice) {
return slice
}
return append(slice[:pos], append(value, slice[pos:]...)...)
}
// CloseReader closes a reader and runs failAction() if error occurs. Call this
// with a defer statement.
func CloseReader(reader io.ReadCloser, failAction func(error)) {
err := reader.Close()
if err != nil {
failAction(err)
}
}
const (
networkTestTimeout = 5 // seconds
networkTestRetries = 3
)
// factoryNetworkTest is a simple the network connection test. You can use this before
// any operation that strictly requires a network connection.
//
// A nil value means the connection is successful.
func factoryNetworkTest(url string, retry int, timeout int) func() (err error) {
return func() (err error) {
retry := networkTestRetries
client := http.Client{Timeout: networkTestTimeout * time.Second}
Retry:
_, err = client.Get(url)
if err != nil {
retry--
if retry > 0 {
goto Retry
}
return err
}
return nil
}
}
var GoogleTest = factoryNetworkTest(
"https://www.google.com",
networkTestRetries,
networkTestRetries,
)
var GithubTest = factoryNetworkTest(
"https://github.com",
networkTestRetries,
networkTestRetries,
)
var RegularTest = factoryNetworkTest(
"https://www.example.com",
networkTestRetries,
networkTestRetries,
)
func MarkdownToAnsi(md string, maxWidth int) string {
trimmed := strings.TrimSpace(md)
if trimmed == "" {
return ""
}
if maxWidth <= 0 {
maxWidth = TermWidth()
}
options := []glamour.TermRendererOption{
glamour.WithWordWrap(maxWidth),
}
if StylesEnabled() {
options = append(options, glamour.WithAutoStyle())
}
renderer, err := glamour.NewTermRenderer(options...)
defer CloseReader(renderer, func(err error) {})
if err != nil {
return trimmed
}
rendered, err := renderer.Render(trimmed)
if err != nil {
return trimmed
}
return strings.TrimRight(rendered, "\n")
}
// Decorate applies a series of decorators to a function. This is used to
// prevent nested function calls for better readability.
func Decorate[T interface{}](f T, decorators ...func(T) T) T {
for _, decorator := range decorators {
f = decorator(f)
}
return f
}
// UnderCd checks if the path is under the current working directory (non-recursive).
func UnderCd(path string) bool {
abs, err := filepath.Abs(path)
if err != nil {
return false
}
cd, err := os.Getwd()
if err != nil {
return false
}
parent := filepath.Dir(abs)
return parent == cd
}
// KeyValue works together with SortAndExtract to sort a slice of Item
// with their corresponding Index.
type KeyValue[T, Ti any] struct {
Item T
Index Ti
}
func SortAndExtract[T, Ti any](
arr []KeyValue[T, Ti],
cmp func(a, b KeyValue[T, Ti]) int,
) (res []T) {
slices.SortFunc(arr, cmp)
for _, item := range arr {
res = append(res, item.Item)
}
return res
}
package tools
// JaroWinklerSimilarity
//
// Returns 0.0-1.0, with 1.0 being a perfect match and 0.0 being no match.
func JaroWinklerSimilarity(s1, s2 string) float64 {
// Special case for empty strings
if len(s1) == 0 && len(s2) == 0 {
return 1.0
}
if len(s1) == 0 || len(s2) == 0 {
return 0.0
}
// Match characters
matchDistance := max(len(s1), len(s2))/2 - 1
if matchDistance < 0 {
matchDistance = 0
}
s1Matches := make([]bool, len(s1))
s2Matches := make([]bool, len(s2))
var matchingCharacters float64 = 0
for i := 0; i < len(s1); i++ {
start := max(0, i-matchDistance)
end := min(i+matchDistance+1, len(s2))
for j := start; j < end; j++ {
if !s2Matches[j] && s1[i] == s2[j] {
s1Matches[i] = true
s2Matches[j] = true
matchingCharacters++
break
}
}
}
// Special case for 0 matches
if matchingCharacters == 0 {
return 0.0
}
var transpositions float64 = 0
var point float64 = 0
for i := 0; i < len(s1); i++ {
if s1Matches[i] {
for j := int(point); j < len(s2); j++ {
if s2Matches[j] {
point = float64(j) + 1
break
}
}
if s1[i] != s2[int(point)-1] {
transpositions++
}
}
}
transpositions /= 2
// Jaro distance
jaroSimilarity := (matchingCharacters/float64(len(s1)) +
matchingCharacters/float64(len(s2)) +
(matchingCharacters-transpositions)/matchingCharacters) / 3.0
// Winkler correction
// Calculate the length of common prefix
const commonPrefixLength = 4
var prefixLength float64 = 0
for i := 0; i < min(len(s1), len(s2), commonPrefixLength); i++ {
if s1[i] == s2[i] {
prefixLength++
} else {
break
}
}
// p is the scaling factor for the Jaro-Winkler distance
// p is usually set to 0.1
p := 0.1
return jaroSimilarity + prefixLength*p*(1-jaroSimilarity)
}
func NormalizedLevenshteinDistance(s1, s2 string) float64 {
maxLen := max(len(s1), len(s2))
if maxLen == 0 {
return 0
}
distance := LevenshteinDistance(s1, s2)
return float64(distance) / float64(maxLen)
}
func LevenshteinDistance(s1, s2 string) int {
// m, n being the lengths of the two strings
m := len(s1)
n := len(s2)
// Create distance matrix
d := make([][]int, m+1)
for i := range d {
d[i] = make([]int, n+1)
}
// Matrix initialization
for i := 0; i <= m; i++ {
d[i][0] = i
}
for j := 0; j <= n; j++ {
d[0][j] = j
}
// Compute Levenshtein distance
for j := 1; j <= n; j++ {
for i := 1; i <= m; i++ {
cost := 1
if s1[i-1] == s2[j-1] {
cost = 0
}
d[i][j] = min(
d[i-1][j]+1, // del
d[i][j-1]+1, // insert
d[i-1][j-1]+cost, // substitute
)
}
}
return d[m][n]
}
func min(nums ...int) int {
result := nums[0]
for _, num := range nums[1:] {
if num < result {
result = num
}
}
return result
}
func max(nums ...int) int {
result := nums[0]
for _, num := range nums[1:] {
if num > result {
result = num
}
}
return result
}
package tools
import (
"encoding/json"
"net/http"
)
// DumpHeader is only used for debugging purposes. It dumps the header of a http.Response.
func DumpHeader(resp http.Response) {
header, _ := json.MarshalIndent(resp.Header, "", " ")
println(string(header))
}
package tools
import (
"encoding/json"
"fmt"
"strings"
"time"
)
// PrintAsJson is usually used for debugging purposes
func PrintAsJson(v interface{}) {
data, err := json.MarshalIndent(v, "", " ")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(string(data))
}
func Capitalize(v any) string {
s, ok := v.(string)
if !ok {
s = fmt.Sprintf("%v", v)
}
if len(s) == 0 {
return ""
}
return strings.ToUpper(s[:1]) + s[1:]
}
func FormatBytesBinary(bytes int64) string {
const (
kib = 1024
mib = kib * 1024
gib = mib * 1024
)
switch {
case bytes >= gib:
return fmt.Sprintf("%.1f GiB", float64(bytes)/float64(gib))
case bytes >= mib:
return fmt.Sprintf("%.1f MiB", float64(bytes)/float64(mib))
case bytes >= kib:
return fmt.Sprintf("%.1f KiB", float64(bytes)/float64(kib))
default:
return fmt.Sprintf("%d B", bytes)
}
}
func FormatBytesDecimal(bytes int64) string {
const (
kb = 1000
mb = kb * 1000
gb = mb * 1000
)
switch {
case bytes >= gb:
return fmt.Sprintf("%.1f GB", float64(bytes)/float64(gb))
case bytes >= mb:
return fmt.Sprintf("%.1f MB", float64(bytes)/float64(mb))
case bytes >= kb:
return fmt.Sprintf("%.1f KB", float64(bytes)/float64(kb))
default:
return fmt.Sprintf("%d B", bytes)
}
}
func FormatDuration(t time.Time) string {
remaining := time.Until(t)
if remaining <= 0 {
return "expired"
}
switch {
case remaining >= 24*time.Hour:
days := int(remaining.Hours() / 24)
return fmt.Sprintf("expires in %dd", days)
case remaining >= time.Hour:
return fmt.Sprintf("expires in %dh", int(remaining.Hours()))
default:
return fmt.Sprintf("expires in %dm", int(remaining.Minutes()))
}
}
package tools
import (
"bufio"
"io"
"os"
)
func MoveFile(src *os.File, dest string) (err error) {
err = os.Rename(src.Name(), dest)
return
}
func CopyFile(src *os.File, dest string, mode os.FileMode) (file *os.File, err error) {
destFile, err := os.OpenFile(dest, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {
return nil, err
}
if _, err = io.Copy(destFile, src); err != nil {
destFile.Close()
return nil, err
}
return destFile, nil
}
func MoveReaderToLine(r io.Reader, line string) error {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
if scanner.Text() == line {
return nil
}
}
return scanner.Err()
}
package tools
import (
"bufio"
"bytes"
"fmt"
"image/color"
"io"
"os"
"regexp"
"strconv"
"time"
"github.com/muesli/termenv"
"golang.org/x/term"
)
func osc4Query(index uint8) color.Color {
if index > 15 {
return nil
}
profile := termenv.ColorProfile()
if profile == termenv.Ascii {
return nil
}
tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0)
if err != nil {
return nil
}
defer tty.Close()
fd := int(tty.Fd())
if !term.IsTerminal(fd) {
return nil
}
// Raw mode
oldState, err := term.MakeRaw(fd)
if err != nil {
return nil
}
defer func() { _ = term.Restore(fd, oldState) }()
// OSC 4 query. Prefer ST terminator (ESC \).
// (Many terminals also accept BEL; ST is the official string terminator.)
query := fmt.Sprintf("\x1b]4;%d;?\x1b\\", index)
// Write query
if _, err := tty.Write([]byte(query)); err != nil {
return nil
}
const timeout = 100 * time.Millisecond
deadline := time.Now().Add(timeout)
_ = tty.SetReadDeadline(deadline)
resp := readResponseWithTimeout(bufio.NewReader(tty), timeout, tty)
if resp == nil {
return nil
}
return parseOSC4Response(index, resp)
}
func readResponseWithTimeout(
r *bufio.Reader,
timeout time.Duration,
closer io.Closer,
) []byte {
resultCh := make(chan []byte, 1)
go func() {
var buf bytes.Buffer
for {
b, err := r.ReadByte()
if err != nil {
resultCh <- nil
return
}
buf.WriteByte(b)
data := buf.Bytes()
n := len(data)
if b == '\a' || (n >= 2 && data[n-2] == 0x1b && data[n-1] == '\\') {
out := append([]byte(nil), data...)
resultCh <- out
return
}
if buf.Len() > 4096 {
resultCh <- nil
return
}
}
}()
timer := time.NewTimer(timeout)
defer timer.Stop()
select {
case result := <-resultCh:
return result
case <-timer.C:
if closer != nil {
_ = closer.Close()
}
return nil
}
}
func parseOSC4Response(index uint8, data []byte) color.Color {
// Example: ESC ] 4 ; 1 ; rgb:ffff/0000/0000 ESC \
// Some terminals may return "#" hex; handle only rgb:.... here for clarity.
re := regexp.MustCompile(`\x1b\]4;` + strconv.Itoa(int(index)) + `;rgb:([0-9a-fA-F]{1,4})/([0-9a-fA-F]{1,4})/([0-9a-fA-F]{1,4})`)
m := re.FindSubmatch(data)
if m == nil {
prefix := []byte("\x1b]4;" + strconv.Itoa(int(index)) + ";")
if bytes.Contains(data, prefix) {
return nil
}
return nil
}
r16, _ := strconv.ParseUint(string(m[1]), 16, 16)
g16, _ := strconv.ParseUint(string(m[2]), 16, 16)
b16, _ := strconv.ParseUint(string(m[3]), 16, 16)
scale := func(v uint64) uint8 { return uint8((v * 255) / 65535) }
return color.RGBA{
R: scale(r16),
G: scale(g16),
B: scale(b16),
A: 255,
}
}
package tools
import (
"fmt"
"image/color"
"os"
"sync"
"charm.land/lipgloss/v2"
"github.com/charmbracelet/x/ansi"
"golang.org/x/term"
)
var IsTerminal = term.IsTerminal(int(os.Stdout.Fd()))
var (
Bold func(any) string
Dim func(any) string
Italic func(any) string
Underline func(any) string
Red func(any) string
Green func(any) string
Yellow func(any) string
Blue func(any) string
Magenta func(any) string
Cyan func(any) string
)
var (
ValidUserColors bool
UserColors = make(map[ansi.BasicColor]color.Color)
)
var stylesEnabled = true
var ensureTermColorsOnce sync.Once
// EnsureTermColors lazily initializes UserColors and ValidUserColors
// on first call, using sync.Once for thread safety.
func EnsureTermColors() {
ensureTermColorsOnce.Do(getTermProfileColors)
}
func init() {
updateStyles()
}
func updateStyles() {
if !stylesEnabled {
noStyle := func(v any) string {
switch v := v.(type) {
case rune:
return string(v)
default:
return fmt.Sprintf("%v", v)
}
}
Bold, Dim, Italic, Underline, Red, Green, Yellow, Blue, Magenta, Cyan = noStyle, noStyle, noStyle, noStyle, noStyle, noStyle, noStyle, noStyle, noStyle, noStyle
return
}
Bold = lsStyle(lipgloss.NewStyle().Bold(true))
Dim = lsStyle(lipgloss.NewStyle().Faint(true))
Italic = lsStyle(lipgloss.NewStyle().Italic(true))
Underline = lsStyle(lipgloss.NewStyle().Underline(true))
Red = lsStyle(lipgloss.NewStyle().Foreground(lipgloss.Red))
Green = lsStyle(lipgloss.NewStyle().Foreground(lipgloss.Green))
Yellow = lsStyle(lipgloss.NewStyle().Foreground(lipgloss.Yellow))
Blue = lsStyle(lipgloss.NewStyle().Foreground(lipgloss.Blue))
Magenta = lsStyle(lipgloss.NewStyle().Foreground(lipgloss.Magenta))
Cyan = lsStyle(lipgloss.NewStyle().Foreground(lipgloss.Cyan))
}
// lsStyle wraps a lipgloss.Style into a func(any) string, matching the
// existing tools.Bold / tools.Dim / ... signature.
func lsStyle(s lipgloss.Style) func(any) string {
return func(v any) string {
switch v := v.(type) {
case rune:
return s.Render(string(v))
default:
return s.Render(fmt.Sprintf("%v", v))
}
}
}
func TurnOffStyles() {
stylesEnabled = false
updateStyles()
}
func StylesEnabled() bool {
return stylesEnabled
}
func TermWidth() int {
width, _, _ := term.GetSize(0)
if width <= 0 {
return 80
}
return width
}
func TermHeight() int {
_, height, _ := term.GetSize(0)
return height
}
func getTermProfileColors() {
for i := ansi.BasicColor(0); i < 16; i++ {
c := osc4Query(uint8(i))
if c == nil {
return
}
UserColors[i] = c
}
ValidUserColors = true
}
package tools
import (
"encoding/json"
)
// SingleOrSlice represents a value that can be encoded as either:
// - a single element: `T`
// - or an array: `[]T`
//
// After unmarshalling, both forms are normalized into a slice shape.
// This type is intended for input-schema compatibility at decoding boundaries.
// Business logic should typically consume plain []T.
type SingleOrSlice[T any] []T
func (s *SingleOrSlice[T]) UnmarshalJSON(data []byte) error {
// Fast-path by JSON shape to avoid ambiguity for permissive T types.
for _, b := range data {
switch b {
case ' ', '\t', '\n', '\r':
continue
case '[':
var multiple []T
if err := json.Unmarshal(data, &multiple); err != nil {
return err
}
*s = multiple
return nil
default:
var single T
if err := json.Unmarshal(data, &single); err != nil {
return err
}
*s = []T{single}
return nil
}
}
// Empty JSON input is treated as an empty slice.
*s = nil
return nil
}
// Package progress provides a terminal progress bar backed by the charm stack
// (bubbletea + bubbles/progress + lipgloss).
//
// Unlike the parent tui package which is a one-shot static renderer, this
// package uses bubbletea for live, interactive progress display.
//
// # Lifecycle Invariants
//
// The progress runtime does not call os.Exit or terminate the process.
// The caller controls process lifecycle. On interrupt (Ctrl+C), the runtime
// sets an internal stopped flag and returns control to the caller.
//
// Close() and Complete() are idempotent and mark entries as completed.
// The renderer stops only when all registered entries have completed.
// After all-complete shutdown, registering a new tracker resets the stopped
// flag and restarts the renderer.
//
// Usage:
//
// t := progress.NewTracker("Downloading")
// go func() {
// defer t.Close()
// resp, _ := http.Get(url)
// reader := t.ProxyReader(resp.Body, resp.ContentLength)
// io.Copy(dst, reader)
// }()
package progress
import (
"context"
"io"
)
// Tracker is a thread-safe progress bar controller.
//
// A Tracker is created with [NewTracker] and automatically starts displaying.
// External goroutines update progress via [Tracker.SetPercent],
// [Tracker.IncrPercent], and [Tracker.SetMessage].
// Call [Tracker.Close] to mark the tracker as completed.
type Tracker struct {
id entryID
logCapacity int
}
// NewTracker creates a [Tracker] with the given title and starts displaying it.
// Log capacity defaults to 5 lines.
func NewTracker(title string) *Tracker {
return newTracker(title, 5)
}
// NewTrackerWithLogging creates a [Tracker] with custom log line capacity.
func NewTrackerWithLogging(title string, logLimit int) *Tracker {
return newTracker(title, logLimit)
}
func newTracker(title string, logCapacity int) *Tracker {
id := globalRuntime.registerEntry(title, logCapacity)
return &Tracker{id: id, logCapacity: logCapacity}
}
// SetPercent sets the current progress to p (clamped to [0, 1]).
func (t *Tracker) SetPercent(p float64) {
globalRuntime.send(t.id, setPercentMsg(clamp01(p)))
}
// IncrPercent adds delta to the current progress.
func (t *Tracker) IncrPercent(delta float64) {
globalRuntime.send(t.id, incrPercentMsg(delta))
}
// SetMessage updates the status text shown alongside the bar.
func (t *Tracker) SetMessage(msg string) {
globalRuntime.send(t.id, setMessageMsg(msg))
}
// SetTitle updates the title shown at the top of the progress bar.
func (t *Tracker) SetTitle(title string) {
globalRuntime.send(t.id, setTitleMsg(title))
}
// Close marks this tracker as completed. The renderer stops only when all
// registered trackers are completed. Close is idempotent.
func (t *Tracker) Close() {
globalRuntime.send(t.id, closeMsg{})
}
// Complete marks this tracker as completed and sets the progress to 100%
// with a completion message. Like Close, the renderer stops only when all
// registered trackers are completed.
func (t *Tracker) Complete(msg string) {
globalRuntime.send(t.id, completeMsg(msg))
}
func (t *Tracker) CacheHit() {
t.Complete("Cache hit")
}
// ProxyReader wraps r so that every Read call updates this Tracker.
// total is the expected total byte count (e.g. from Content-Length).
// If total <= 0 the bar will not be updated (indeterminate).
func (t *Tracker) ProxyReader(r io.Reader, total int64) io.Reader {
return &proxyReader{Reader: r, tracker: t, total: total}
}
// LogWriter returns an io.Writer that ingests streaming bytes, splits by
// newline, and sends log-update messages to runtime. Partial fragments are
// preserved between writes and only complete lines are emitted unless the
// runtime explicitly flushes on close.
func (t *Tracker) LogWriter() io.Writer {
return &logWriter{tracker: t}
}
// setBytesProgress is an internal method used by proxyReader to send
// byte-level progress updates to the model.
func (t *Tracker) setBytesProgress(read, total int64) {
globalRuntime.send(t.id, bytesProgressMsg{read: read, total: total})
}
// appendLog is an internal method used by logWriter to send log data to the
// runtime. The runtime handles newline splitting and partial-line buffering.
func (t *Tracker) appendLog(data string) {
globalRuntime.send(t.id, appendLogMsg(data))
}
// WaitForShutdown blocks until the progress runtime completes teardown or ctx expires.
// Returns nil if runtime completes successfully, ctx.Err() on timeout/cancellation.
// Returns immediately if runtime is not active.
func WaitForShutdown(ctx context.Context) error {
return globalRuntime.waitForShutdown(ctx)
}
package progress
import (
"math"
"github.com/mclucy/lucy/tools"
)
func clamp01(v float64) float64 {
return math.Max(0, math.Min(1, v))
}
func getTrackerWidth(termWidth int) (w int) {
if termWidth <= 0 {
// unset or invalid width
termWidth = tools.TermWidth()
}
w = tools.Ternary(termWidth >= 125, 100, termWidth-50)
w = tools.Ternary(w < 10, 10, w)
return w
}
package progress
import "io"
type proxyReader struct {
io.Reader
tracker *Tracker
total int64
read int64
}
func (r *proxyReader) Read(p []byte) (int, error) {
n, err := r.Reader.Read(p)
r.read += int64(n)
if r.total > 0 {
r.tracker.setBytesProgress(r.read, r.total)
}
return n, err
}
// Package progress runtime manages the bubbletea program lifecycle.
//
// # Lifecycle States
//
// The runtime transitions through states: idle -> running -> stopped.
// The stopped flag is set on interrupt (Ctrl+C) or when all entries complete.
// After all-complete shutdown, new tracker registration resets stopped and restarts.
//
// # Graceful Interrupt
//
// On Ctrl+C, the runtime sets the stopped atomic flag and returns control
// to the caller. The runtime does not call os.Exit - the caller controls
// process lifecycle.
//
// # Idempotent Shutdown
//
// The stopped atomic flag ensures shutdown operations are idempotent.
// Multiple Close() calls or interrupts are safe. Defer-based cleanup in
// the runtime goroutine ensures fields reset on all exit paths.
package progress
import (
"context"
"errors"
"fmt"
"os"
"strings"
"sync"
"sync/atomic"
"charm.land/bubbles/v2/progress"
tea "charm.land/bubbletea/v2"
"charm.land/lipgloss/v2"
"github.com/mclucy/lucy/tools"
)
type entryID int
type entryState struct {
title string
bar progress.Model
message string
percent float64
readBytes int64
totalBytes int64
logLines []string
partialLog string
logCap int
completed bool
}
type entryMsg struct {
id entryID
payload tea.Msg
}
type runtime struct {
program *tea.Program
entries map[entryID]*entryState
entryOrder []entryID
finalMessage string
mu sync.Mutex
running bool
nextID atomic.Int32
done chan struct{}
stopped atomic.Bool
}
func (m *runtime) Init() tea.Cmd { return nil }
func (m *runtime) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
if msg.String() == "ctrl+c" {
return m, tea.Interrupt
}
case tea.WindowSizeMsg:
m.mu.Lock()
m.resizeBarsLocked(msg.Width)
m.mu.Unlock()
case entryMsg:
m.mu.Lock()
entry, ok := m.entries[msg.id]
if !ok {
m.mu.Unlock()
return m, nil
}
var cmd tea.Cmd
switch payload := msg.payload.(type) {
case setPercentMsg:
entry.percent = float64(payload)
case incrPercentMsg:
entry.percent = clamp01(entry.percent + float64(payload))
case setMessageMsg:
entry.message = string(payload)
case setTitleMsg:
entry.title = string(payload)
m.resizeBarsLocked(0)
case bytesProgressMsg:
if payload.total > 0 {
entry.percent = float64(payload.read) / float64(payload.total)
}
entry.readBytes = payload.read
entry.totalBytes = payload.total
case appendLogMsg:
entry.partialLog += string(payload)
lines := strings.Split(entry.partialLog, "\n")
if len(lines) > 1 {
entry.logLines = append(entry.logLines, lines[:len(lines)-1]...)
entry.partialLog = lines[len(lines)-1]
if entry.logCap > 0 && len(entry.logLines) > entry.logCap {
entry.logLines = entry.logLines[len(entry.logLines)-entry.logCap:]
}
}
case completeMsg:
entry.percent = 1.0
entry.message = string(payload)
entry.completed = true
// order sensitive, set success colors last so they override global options
options := append(globalOptions, successColorOptions()...)
entry.bar = progress.New(options...)
entry.bar.SetWidth(m.barWidthLocked(0))
if m.allCompleted() {
m.finalMessage = m.buildFinalMessageLocked()
m.mu.Unlock()
return m, tea.Quit
}
case closeMsg:
entry.percent = 1.0
if entry.message == "" {
entry.message = "Done"
}
entry.completed = true
if m.allCompleted() {
m.finalMessage = m.buildFinalMessageLocked()
m.mu.Unlock()
return m, tea.Quit
}
}
m.mu.Unlock()
return m, cmd
}
return m, nil
}
func (m *runtime) View() tea.View {
m.mu.Lock()
defer m.mu.Unlock()
var lines []string
titleWidth := m.maxTitleWidthLocked()
for i := len(m.entryOrder) - 1; i >= 0; i-- {
id := m.entryOrder[i]
entry, ok := m.entries[id]
if !ok {
continue
}
for _, logLine := range entry.logLines {
lines = append(lines, tools.Dim(logLine))
}
var sb strings.Builder
titleCell := lipgloss.NewStyle().Width(titleWidth).Render(entry.title)
sb.WriteString(tools.Bold(tools.Magenta(titleCell)))
sb.WriteString(strings.Repeat(" ", 2))
sb.WriteString(entry.bar.ViewAs(entry.percent))
if entry.totalBytes > 0 {
sb.WriteString(" ")
sb.WriteString(
tools.Dim(
fmt.Sprintf(
"%s / %s",
tools.FormatBytesBinary(entry.readBytes),
tools.FormatBytesBinary(entry.totalBytes),
),
),
)
} else if entry.message != "" {
sb.WriteString(" ")
sb.WriteString(tools.Dim(entry.message))
} else {
sb.WriteString(" ")
sb.WriteString(tools.Dim(fmt.Sprintf("%.1f%%", entry.percent*100)))
}
lines = append(lines, sb.String())
}
if m.finalMessage != "" {
lines = append(lines, "")
lines = append(lines, tools.Green("✓")+" "+tools.Dim(m.finalMessage))
}
return tea.NewView(strings.Join(lines, "\n"))
}
func (m *runtime) allCompleted() bool {
for _, entry := range m.entries {
if !entry.completed {
return false
}
}
return len(m.entries) > 0
}
var globalRuntime = &runtime{
entries: make(map[entryID]*entryState),
}
func (r *runtime) registerEntry(title string, logCapacity int) entryID {
if !tools.IsTerminal {
return 0
}
r.mu.Lock()
canRestart := len(r.entries) == 0 || r.allCompleted()
r.mu.Unlock()
if r.stopped.Load() && !canRestart {
return 0
}
if r.stopped.Load() && canRestart {
r.stopped.Store(false)
}
id := entryID(r.nextID.Add(1))
options := append([]progress.Option(nil), globalOptions...)
r.mu.Lock()
if len(r.entries) > 0 && r.allCompleted() {
r.entries = make(map[entryID]*entryState)
r.entryOrder = nil
r.finalMessage = ""
}
r.entries[id] = &entryState{
title: title,
bar: progress.New(options...),
logCap: logCapacity,
}
r.entryOrder = append(r.entryOrder, id)
r.finalMessage = ""
r.resizeBarsLocked(0)
needStart := !r.running
r.mu.Unlock()
if needStart && !r.stopped.Load() {
r.start()
}
return id
}
func (r *runtime) start() {
r.mu.Lock()
if r.running {
r.mu.Unlock()
return
}
r.running = true
r.done = make(chan struct{})
r.program = tea.NewProgram(r)
r.mu.Unlock()
go func() {
defer close(r.done)
_, err := r.program.Run()
if errors.Is(err, tea.ErrInterrupted) {
os.Exit(130)
}
r.mu.Lock()
r.running = false
r.program = nil
r.mu.Unlock()
}()
}
func (m *runtime) resizeBarsLocked(termWidth int) {
barWidth := m.barWidthLocked(termWidth)
for _, entry := range m.entries {
entry.bar.SetWidth(barWidth)
}
}
func (m *runtime) barWidthLocked(termWidth int) int {
width := getTrackerWidth(termWidth)
barWidth := width - m.maxTitleWidthLocked() - 2
if barWidth < 10 {
return 10
}
return barWidth
}
func (m *runtime) maxTitleWidthLocked() int {
maxWidth := 0
for _, entry := range m.entries {
if width := lipgloss.Width(entry.title); width > maxWidth {
maxWidth = width
}
}
return maxWidth
}
func (m *runtime) buildFinalMessageLocked() string {
if len(m.entryOrder) == 1 {
entry := m.entries[m.entryOrder[0]]
if entry != nil && entry.message != "" {
return entry.message
}
if entry != nil {
return entry.title + " completed"
}
}
count := len(m.entries)
if count == 1 {
return "1 task completed"
}
return fmt.Sprintf("%d tasks completed", count)
}
func (r *runtime) send(id entryID, msg tea.Msg) {
if r.stopped.Load() {
return
}
r.mu.Lock()
running := r.running
program := r.program
r.mu.Unlock()
if running && program != nil {
program.Send(entryMsg{id: id, payload: msg})
}
}
func (r *runtime) waitForShutdown(ctx context.Context) error {
r.mu.Lock()
done := r.done
r.mu.Unlock()
if done == nil {
return nil
}
select {
case <-done:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
package progress
import (
"charm.land/bubbles/v2/progress"
"charm.land/lipgloss/v2"
"github.com/mclucy/lucy/tools"
)
var globalOptions []progress.Option
func init() {
if !tools.IsTerminal {
return
}
globalOptions = append(globalOptions, progress.WithFillCharacters('█', '░'))
}
// colorOptions returns color options lazily, ensuring OSC4 probing
// has been completed first. This is called at first use, not at init time.
func colorOptions() []progress.Option {
tools.EnsureTermColors()
if tools.ValidUserColors {
return []progress.Option{
progress.WithColors(
tools.UserColors[lipgloss.Magenta],
tools.UserColors[lipgloss.BrightMagenta],
),
}
}
return []progress.Option{progress.WithColors(lipgloss.Magenta)}
}
// successColorOptions returns color options for success state,
// lazily ensuring OSC4 probing has been completed first.
func successColorOptions() []progress.Option {
tools.EnsureTermColors()
if tools.ValidUserColors {
return []progress.Option{
progress.WithColors(
tools.UserColors[lipgloss.Magenta],
tools.UserColors[lipgloss.Blue],
tools.UserColors[lipgloss.BrightBlue],
),
}
}
return []progress.Option{progress.WithColors(lipgloss.Blue)}
}
package progress
type logWriter struct {
tracker *Tracker
}
func (w *logWriter) Write(p []byte) (int, error) {
if len(p) > 0 {
w.tracker.appendLog(string(p))
}
return len(p), nil
}
// Package tui is a key-value based commandline output framework.
//
// The core of this package is the Data struct, which holds an array of Field
// values representing different types of output formats. Each Field implements
// the Render() method that returns a formatted string. The Data struct can be
// passed to Flush to print the composed output.
//
// Rendering uses lipgloss-based styling instead of raw ANSI codes, and
// fixed-width key columns instead of tabwriter for simpler, more predictable
// layout.
//
// Note: a field will not show if its content is empty.
package tui
import (
"fmt"
"strconv"
"strings"
lipgloss "charm.land/lipgloss/v2"
"github.com/muesli/reflow/wrap"
"golang.org/x/term"
"github.com/mclucy/lucy/tools"
)
// Data is a collection of Field values to be rendered together.
type Data struct {
Fields []Field
}
// Field is the interface for all renderable output elements. Each
// implementation returns its formatted string representation from Render.
type Field interface {
Render() string
KeyLength() int
}
// FieldSeparator renders a horizontal separator line. A Length of 0 produces
// a line spanning 80% of the terminal width.
//
// Proportional turns the Length value into a percentage of the terminal width
// instead of a character count, so Length=50 with Proportional=true would render
// a line spanning 50% of the terminal width. If Proportional is true, Length
// is treated as a percentage and should be between 0 and 100; values outside
// this range will be clamped.
type FieldSeparator struct {
Length int
Proportional bool
Dim bool
}
func (f *FieldSeparator) KeyLength() int {
return 0
}
func (f *FieldSeparator) Render() string {
if f.Proportional {
f.Length = f.Length * tools.TermWidth() / 100
}
if f.Length == 0 {
f.Length = tools.TermWidth() * 8 / 10
}
return renderSeparator(f.Length, f.Dim)
}
// FieldAnnotation renders a single line of dimmed annotation text.
type FieldAnnotation struct {
Annotation string
}
func (f *FieldAnnotation) KeyLength() int {
return 0
}
func (f *FieldAnnotation) Render() string {
return renderDim(f.Annotation) + "\n"
}
// FieldShortText renders a key-value pair on one line.
type FieldShortText struct {
Title string
Text string
}
func (f *FieldShortText) KeyLength() int {
return len(f.Title)
}
func (f *FieldShortText) Render() string {
return renderKey(f.Title) + f.Text + "\n"
}
// FieldMarkdown renders Markdown content as styled ANSI terminal output.
type FieldMarkdown FieldLongText
func (f *FieldMarkdown) KeyLength() int {
return len(f.Title)
}
func (f *FieldMarkdown) Render() string {
long := FieldLongText(*f)
long.Text = tools.MarkdownToAnsi(f.Text, f.MaxColumns)
long.LineWrap = false
return long.Render() + "\n"
}
// FieldLongText renders multi-line text content with optional word-wrapping
// and line count truncation.
type FieldLongText struct {
Title string
Text string
Padding bool // Padding adds a short separator before the text body
LineWrap bool
MaxColumns int
MaxLines int
UseAlternate bool // UseAlternate shows AlternateText instead of the text body if it is truncated
AlternateText string // AlternateText is shown instead of the text body if it is truncated
FoldNotice string // FoldNotice is a dimmed message shown after the text body if it is truncated, left empty for default message
}
func (f *FieldLongText) KeyLength() int {
return len(f.Title)
}
func (f *FieldLongText) Render() string {
text := f.Text
if f.LineWrap {
text = wrap.String(text, f.MaxColumns)
}
lines := strings.Split(text, "\n")
lineNumber := len(lines)
// lineNumberAnnotation shows the full line count, regardless of truncation.
lineNumberAnnotation := renderDim(
fmt.Sprintf("(total %d lines)", lineNumber),
)
// If MaxLines is set and the text exceeds it, truncate or show alternate text.
truncated := f.MaxLines != 0 && len(lines) > f.MaxLines
if truncated {
// If UseAlternate is true, show AlternateText instead of the truncated text body.
if f.UseAlternate {
if f.AlternateText == "" {
return ""
}
alternateText := FieldShortText{
Title: f.Title,
Text: f.AlternateText + " " + lineNumberAnnotation,
}
rendered := alternateText.Render()
// Use default fold notice if FoldNotice is empty
if f.FoldNotice == "" {
f.FoldNotice = renderDim(fmt.Sprintf("full text not shown, use --long or expand the terminal"))
}
rendered += renderTab() + renderDim(f.FoldNotice)
return rendered
}
// Use default fold notice if FoldNotice is empty
if f.FoldNotice == "" {
f.FoldNotice = fmt.Sprintf(
"...\n%d lines left, use --long or expand the terminal\n",
lineNumber-f.MaxLines,
)
}
f.FoldNotice = renderDim(f.FoldNotice)
// Truncate to MaxLines
lines = lines[:f.MaxLines]
// Append fold notice after truncated text
lines = append(lines, f.FoldNotice)
}
var sb strings.Builder
sb.WriteString(renderKey(f.Title))
sb.WriteString(lineNumberAnnotation)
sb.WriteString("\n")
if f.Padding {
sb.WriteString(renderSeparator(5, false))
}
for _, line := range lines {
sb.WriteString(line)
sb.WriteString("\n")
}
return sb.String()
}
// FieldAnnotatedShortText renders a key-value pair with a dimmed annotation
// placed inline after the value.
type FieldAnnotatedShortText struct {
Title string
Text string
Annotation string
}
func (f *FieldAnnotatedShortText) KeyLength() int {
return len(f.Title)
}
func (f *FieldAnnotatedShortText) Render() string {
var sb strings.Builder
sb.WriteString(renderKey(f.Title))
sb.WriteString(f.Text)
if f.Annotation != "" {
sb.WriteString(renderAnnot(f.Annotation))
}
sb.WriteString("\n")
return sb.String()
}
// FieldNil is a no-op field that renders nothing.
var FieldNil = &fieldNil{}
type fieldNil struct{}
func (f *fieldNil) KeyLength() int {
return 0
}
func (f *fieldNil) Render() string { return "" }
// FieldLabels renders a title followed by a comma-separated list of labels
// that wraps across lines. If MaxWidth is 0, it defaults to
// max(33% of terminal width, 40).
type FieldLabels struct {
Title string
Labels []string
MaxWidth int
MaxLines int
}
func (f *FieldLabels) KeyLength() int {
return len(f.Title)
}
func (f *FieldLabels) Render() string {
if len(f.Labels) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString(renderKey(f.Title))
maxW := f.MaxWidth
if maxW == 0 {
maxW = max(33*tools.TermWidth()/100, 40)
}
width := 0
lines := 1
for i, label := range f.Labels {
sb.WriteString(label)
if i != len(f.Labels)-1 {
sb.WriteString(", ")
}
width += len(label) + 2
if width >= maxW && i != len(f.Labels)-1 {
sb.WriteString("\n")
sb.WriteString(renderTab())
width = 0
lines++
if f.MaxLines != 0 && lines > f.MaxLines {
sb.WriteString(renderDim("(" + strconv.Itoa(len(f.Labels)-i-1) + " more, use --long to show all)"))
sb.WriteString("\n")
return sb.String()
}
}
}
if width != 0 {
sb.WriteString("\n")
}
return sb.String()
}
// FieldDynamicColumnLabels renders labels in a dynamic grid whose column
// count is derived from the terminal width and longest label length.
//
// NoTitle renders a label-only grid without a key column, useful for search
// results and similar content.
type FieldDynamicColumnLabels struct {
Title string
Labels []string
MaxLines int
MaxColumns int
ShowTotal bool
NoTitle bool
}
func (f *FieldDynamicColumnLabels) KeyLength() int {
return len(f.Title)
}
func (f *FieldDynamicColumnLabels) Render() string {
if len(f.Labels) == 0 {
return ""
}
var sb strings.Builder
if !f.NoTitle {
sb.WriteString(renderKey(f.Title))
}
longestLabel := 0
for _, label := range f.Labels {
if len(label) > longestLabel {
longestLabel = len(label)
}
}
colWidth := longestLabel + 2
columnNumber := (tools.TermWidth() - keyColumnWidth) / colWidth
if columnNumber <= 0 {
columnNumber = 1
}
if f.MaxColumns != 0 && columnNumber > f.MaxColumns {
columnNumber = f.MaxColumns
}
currentLine := 1
for i, label := range f.Labels {
lastInRow := (i+1)%columnNumber == 0
lastAmongAll := i == len(f.Labels)-1
padded := label + strings.Repeat(" ", colWidth-len(label))
sb.WriteString(padded)
// If MaxLines is set, and we've reached the limit, show a total count of
// remaining labels and stop rendering more.
if f.MaxLines != 0 && currentLine == f.MaxLines && lastInRow {
sb.WriteString("\n")
if !f.NoTitle {
sb.WriteString(renderTab())
}
if f.ShowTotal {
sb.WriteString(
renderDim(
fmt.Sprintf(
"(%d in total, %d more)",
len(f.Labels),
len(f.Labels)-i-1,
),
),
)
} else {
sb.WriteString(
renderDim(
fmt.Sprintf(
"(%d more)",
len(f.Labels)-i-1,
),
),
)
}
sb.WriteString("\n")
return sb.String()
}
// If this is the last label, optionally show a total count of all labels.
if lastAmongAll {
if f.ShowTotal {
sb.WriteString("\n")
if lastInRow && !f.NoTitle {
sb.WriteString(renderTab())
}
sb.WriteString(
renderDim(
fmt.Sprintf(
"(%d total)",
len(f.Labels),
),
),
)
}
sb.WriteString("\n")
return sb.String()
}
// For the last label in a row, add a newline and indentation for the next row.
if lastInRow {
sb.WriteString("\n")
currentLine++
if !f.NoTitle {
sb.WriteString(renderTab())
}
}
}
return sb.String()
}
// FieldMultiAnnotatedShortText renders multiple annotated lines under one key.
// len(Texts) determines the number of lines; extra entries in Annotations are ignored.
type FieldMultiAnnotatedShortText struct {
Title string
Texts []string
Annotations []string
ShowTotal bool
}
func (f *FieldMultiAnnotatedShortText) KeyLength() int {
return len(f.Title)
}
func (f *FieldMultiAnnotatedShortText) Render() string {
if len(f.Texts) == 0 {
return ""
}
var sb strings.Builder
for i, t := range f.Texts {
if i == 0 {
sb.WriteString(renderKey(f.Title))
} else {
sb.WriteString(renderTab())
}
sb.WriteString(t)
if f.Annotations != nil && i < len(f.Annotations) {
sb.WriteString(renderAnnot(f.Annotations[i]))
}
sb.WriteString("\n")
}
if f.ShowTotal {
sb.WriteString(renderTab())
sb.WriteString(renderDim("(" + strconv.Itoa(len(f.Texts)) + " total)"))
sb.WriteString("\n")
}
return sb.String()
}
// FieldMultiShortText renders multiple values under a single key, one per line.
type FieldMultiShortText struct {
Title string
Texts []string
ShowTotal bool
}
func (f *FieldMultiShortText) KeyLength() int {
return len(f.Title)
}
func (f *FieldMultiShortText) Render() string {
if len(f.Texts) == 0 {
return ""
}
var sb strings.Builder
for i, t := range f.Texts {
if i == 0 {
sb.WriteString(renderKey(f.Title))
} else {
sb.WriteString(renderTab())
}
sb.WriteString(t)
sb.WriteString("\n")
}
if f.ShowTotal {
sb.WriteString(renderTab())
sb.WriteString(renderDim("(" + strconv.Itoa(len(f.Texts)) + " total)"))
sb.WriteString("\n")
}
return sb.String()
}
// FieldCheckBox renders a boolean value as a check mark (✓) or cross (✗).
// Custom TrueText/FalseText override the defaults.
type FieldCheckBox struct {
Title string
Boolean bool
TrueText string
FalseText string
}
func (f *FieldCheckBox) KeyLength() int {
return len(f.Title)
}
func (f *FieldCheckBox) Render() string {
trueText := f.TrueText
if trueText == "" {
trueText = tools.Green("\u2713") // ✓
}
falseText := f.FalseText
if falseText == "" {
falseText = tools.Red("\u2717") // ✗
}
var sb strings.Builder
sb.WriteString(renderKey(f.Title))
if f.Boolean {
sb.WriteString(trueText)
} else {
sb.WriteString(falseText)
}
sb.WriteString("\n")
return sb.String()
}
// clipLines truncates each line in s to at most maxWidth runes.
// A maxWidth <= 0 is treated as "no clipping".
func clipLines(s string, maxWidth int) string {
if maxWidth <= 0 {
return s
}
lines := strings.Split(s, "\n")
for i, l := range lines {
runes := []rune(l)
if len(runes) > maxWidth {
lines[i] = string(runes[:maxWidth])
}
}
return strings.Join(lines, "\n")
}
// Flush renders all fields in data and prints the composed output.
func Flush(data *Data) {
var logoField *FieldLogo
for _, field := range data.Fields {
if fl, ok := field.(*FieldLogo); ok {
logoField = fl
break
}
}
if logoField == nil {
for _, field := range data.Fields {
if field.KeyLength() > keyColumnWidth {
keyColumnWidth = field.KeyLength()
}
}
keyColumnWidth += keyColPadding
var sb strings.Builder
for _, field := range data.Fields {
if field != nil {
sb.WriteString(field.Render())
}
}
sb.WriteString("\n")
fmt.Print(sb.String())
return
}
// LOGO BRANCH: uses a local key width; does not corrupt the global.
localKeyWidth := 0
for _, field := range data.Fields {
if _, ok := field.(*FieldLogo); ok {
continue
}
if field.KeyLength() > localKeyWidth {
localKeyWidth = field.KeyLength()
}
}
localKeyWidth += keyColPadding
// Save/restore keyColumnWidth so existing Render() methods pick up our
// local width without permanently corrupting the global.
savedKeyColumnWidth := keyColumnWidth
keyColumnWidth = localKeyWidth
defer func() { keyColumnWidth = savedKeyColumnWidth }()
var infoSb strings.Builder
for _, field := range data.Fields {
if field == nil {
continue
}
if _, ok := field.(*FieldLogo); ok {
continue
}
infoSb.WriteString(field.Render())
}
infoBlock := infoSb.String()
isTTY := term.IsTerminal(1)
params := NegotiateStatusLayout(
tools.TermWidth(),
logoField.Width(LogoLargePlain),
logoField.Width(LogoSmallPlain),
isTTY,
)
var output string
switch params.Mode {
case LayoutLargeLogoSideBySide, LayoutSmallLogoSideBySide:
variant := LogoLargePlain
if params.Mode == LayoutSmallLogoSideBySide {
variant = LogoSmallPlain
}
logoLines := logoField.Lines(variant)
logoBlock := strings.Join(logoLines, "\n")
gapStr := strings.Repeat(" ", params.GapWidth)
infoStyle := lipgloss.NewStyle().Width(params.InfoWidth)
constrainedInfo := infoStyle.Render(infoBlock)
output = lipgloss.JoinHorizontal(
lipgloss.Top,
logoBlock,
gapStr,
constrainedInfo,
)
case LayoutVertical:
logoLines := logoField.Lines(LogoLargePlain)
output = strings.Join(logoLines, "\n") + "\n\n" + infoBlock
case LayoutClipped:
output = clipLines(infoBlock, params.InfoWidth)
default:
output = infoBlock
}
fmt.Print(output)
fmt.Println()
}
package tui
import (
"strings"
"charm.land/lipgloss/v2"
"github.com/mclucy/lucy/tools"
)
// keyColumnWidth takes the longest key label in the current context and adds
// padding to ensure alignment of values in a two-column layout. It is set by
// while Flush() is rendering the current view.
var keyColumnWidth int
// keyColPadding is the fixed padding (in characters) added to the key column width
// to ensure minimum spacing between keys and values.
const keyColPadding = 2
// renderKey renders a styled key label with fixed-width padding for alignment.
func renderKey(title string) string {
styled := tools.Bold(tools.Magenta(title))
visualWidth := lipgloss.Width(styled)
padding := keyColumnWidth - visualWidth
if padding < 2 {
padding = 2
}
return styled + strings.Repeat(" ", padding)
}
// renderDim renders text with a dimmed/faint style.
func renderDim(text string) string {
return tools.Dim(text)
}
// renderAnnot renders an inline annotation (dimmed, with leading spacing).
func renderAnnot(annotation string) string {
return " " + tools.Dim(annotation)
}
// renderTab returns whitespace matching the key column width, used for
// continuation lines that need to align with the value column.
func renderTab() string {
return strings.Repeat(" ", keyColumnWidth)
}
// renderSeparator returns a horizontal separator line.
//
// A zero-length separator is allowed and will not render anything. However,
// lengths longer than the terminal width will be truncated to fit.
func renderSeparator(length int, dim bool) string {
if length > tools.TermWidth() {
length = tools.TermWidth()
}
sep := strings.Repeat("-", length)
if dim {
return renderDim(sep) + "\n"
}
return sep + "\n"
}
package tui
// StatusLayoutMode describes how the status view arranges the logo and info
// blocks within the available terminal width.
type StatusLayoutMode int
const (
// LayoutLargeLogoSideBySide places the large logo to the left of the
// info block with a gap between them.
LayoutLargeLogoSideBySide StatusLayoutMode = iota
// LayoutSmallLogoSideBySide places the small logo to the left of the
// info block with a gap between them.
LayoutSmallLogoSideBySide
// LayoutVertical stacks the logo above the info block (no
// side-by-side).
LayoutVertical
// LayoutClipped renders only the info block, clipped to the terminal
// width. Used when the terminal is narrower than minInfoWidth.
LayoutClipped
// LayoutInfoOnly renders only the info block with no logo at all.
// Used in non-TTY (piped) contexts.
LayoutInfoOnly
)
const (
// statusLayoutGapWidth is the number of blank columns between the logo
// and info blocks in side-by-side modes.
statusLayoutGapWidth = 3
// statusLayoutMinInfoWidth is the minimum number of columns the info
// block needs to be useful.
statusLayoutMinInfoWidth = 40
)
// StatusLayoutParams holds the result of layout negotiation: the chosen mode
// and the width budgets for each visual element.
type StatusLayoutParams struct {
Mode StatusLayoutMode
LogoWidth int
InfoWidth int
GapWidth int
}
// NegotiateStatusLayout decides which layout mode to use given the terminal
// width, the widths of the two logo variants, and whether the output is a
// TTY. It returns the mode together with pixel-budget details so that the
// compositor can render without further arithmetic.
func NegotiateStatusLayout(termWidth int, logoLargeWidth int, logoSmallWidth int, isTTY bool) StatusLayoutParams {
if !isTTY {
return StatusLayoutParams{
Mode: LayoutInfoOnly,
LogoWidth: 0,
InfoWidth: termWidth,
GapWidth: 0,
}
}
// Large logo side-by-side: logo + gap + info (>= minInfoWidth).
if termWidth >= logoLargeWidth+statusLayoutGapWidth+statusLayoutMinInfoWidth {
return StatusLayoutParams{
Mode: LayoutLargeLogoSideBySide,
LogoWidth: logoLargeWidth,
InfoWidth: termWidth - logoLargeWidth - statusLayoutGapWidth,
GapWidth: statusLayoutGapWidth,
}
}
// Small logo side-by-side.
if termWidth >= logoSmallWidth+statusLayoutGapWidth+statusLayoutMinInfoWidth {
return StatusLayoutParams{
Mode: LayoutSmallLogoSideBySide,
LogoWidth: logoSmallWidth,
InfoWidth: termWidth - logoSmallWidth - statusLayoutGapWidth,
GapWidth: statusLayoutGapWidth,
}
}
// Vertical: logo stacked above info. Requires at least minInfoWidth.
if termWidth >= statusLayoutMinInfoWidth {
return StatusLayoutParams{
Mode: LayoutVertical,
LogoWidth: logoLargeWidth,
InfoWidth: termWidth,
GapWidth: 0,
}
}
// Clipped: terminal too narrow even for minInfoWidth.
return StatusLayoutParams{
Mode: LayoutClipped,
LogoWidth: 0,
InfoWidth: termWidth,
GapWidth: 0,
}
}
package tui
import (
_ "embed"
"strings"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
// LogoVariant selects between the large and small logo variants.
type LogoVariant int
const (
// LogoLargePlain selects the full-size ASCII art logo.
LogoLargePlain LogoVariant = iota
// LogoSmallPlain selects the compact ASCII art logo.
LogoSmallPlain
LogoLargeColored
LogoSmallColored
)
const (
logoSmallMaxWidth = 30
logoLargeMaxWidth = 72
)
// FieldLogo is a Field that holds the ASCII logo for the status view.
// It satisfies the Field interface so it can be placed in Data.Fields,
// but its primary API is the Lines / Width / Height helpers which the
// layout compositor uses to build the neofetch-style side-by-side view.
type FieldLogo struct {
Platform types.Platform // TODO: this is not limited to platform
NoColor bool
}
// Render returns the large logo as a plain string. This is a fallback for
// callers that are not layout-aware and simply iterate over Fields.
func (f *FieldLogo) Render() string {
variant := tools.Ternary(
useLargeLogo(),
tools.Ternary(f.NoColor, LogoLargePlain, LogoLargeColored),
tools.Ternary(f.NoColor, LogoSmallPlain, LogoSmallColored),
)
logo := GetLogo(f.Platform, variant)
return strings.Join(normalizeLines(logo), "\n")
}
// KeyLength returns 0 because the logo is not a key-value field.
func (f *FieldLogo) KeyLength() int {
return 0
}
// Lines returns the normalized lines of the requested logo variant.
// Each line is padded with trailing spaces so that all lines share the
// same width, making grid-based composition straightforward.
func (f *FieldLogo) Lines(variant LogoVariant) []string {
return normalizeLines(GetLogo(f.Platform, variant))
}
// Width returns the uniform width (in runes) of every line for the given
// logo variant.
func (f *FieldLogo) Width(variant LogoVariant) int {
lines := normalizeLines(GetLogo(f.Platform, variant))
if len(lines) == 0 {
return 0
}
return len([]rune(lines[0]))
}
// Height returns the number of lines for the given logo variant.
func (f *FieldLogo) Height(variant LogoVariant) int {
return len(normalizeLines(GetLogo(f.Platform, variant)))
}
func useLargeLogo() bool {
termWidth := tools.TermWidth()
return termWidth >= logoLargeMaxWidth+statusLayoutGapWidth+statusLayoutMinInfoWidth
}
// normalizeLines splits the raw logo text into lines, strips \r characters,
// drops trailing empty lines, and pads every line with spaces so that all
// lines share the same width.
func normalizeLines(raw string) []string {
raw = strings.ReplaceAll(raw, "\r", "")
lines := strings.Split(raw, "\n")
// Trim trailing empty lines.
for len(lines) > 0 && strings.TrimSpace(lines[len(lines)-1]) == "" {
lines = lines[:len(lines)-1]
}
if len(lines) == 0 {
return nil
}
// Find maximum width (in runes).
maxWidth := 0
for _, line := range lines {
if w := len([]rune(line)); w > maxWidth {
maxWidth = w
}
}
// Pad each line to maxWidth.
for i, line := range lines {
runeLen := len([]rune(line))
if runeLen < maxWidth {
lines[i] = line + strings.Repeat(" ", maxWidth-runeLen)
}
}
return lines
}
package tui
import (
_ "embed"
"github.com/mclucy/lucy/types"
)
var (
//go:embed assets/large_plain/fabric.txt
fabricNoColorLarge string
//go:embed assets/small_plain/fabric.txt
fabricNoColorSmall string
//go:embed assets/large_plain/forge.txt
forgeNoColorLarge string
//go:embed assets/small_plain/forge.txt
forgeNoColorSmall string
//go:embed assets/large_plain/neoforge.txt
neoforgeNoColorLarge string
//go:embed assets/small_plain/neoforge.txt
neoforgeNoColorSmall string
)
func GetLogo(platform types.Platform, variant LogoVariant) string {
switch platform {
case types.PlatformFabric:
switch variant {
case LogoLargePlain:
return fabricNoColorLarge
case LogoSmallPlain:
return fabricNoColorSmall
default:
return ""
}
case types.PlatformForge:
switch variant {
case LogoLargePlain:
return forgeNoColorLarge
case LogoSmallPlain:
return forgeNoColorSmall
default:
return ""
}
case types.PlatformNeoforge:
switch variant {
case LogoLargePlain:
return neoforgeNoColorLarge
case LogoSmallPlain:
return neoforgeNoColorSmall
default:
return ""
}
default:
return ""
}
}
package types
// SearchSort controls how providers rank search results.
type SearchSort string
const (
SearchSortRelevance SearchSort = "relevance"
SearchSortDownloads SearchSort = "downloads"
SearchSortNewest SearchSort = "newest"
SearchSortName SearchSort = "name"
)
func (s SearchSort) Valid() bool {
switch s {
case SearchSortRelevance, SearchSortDownloads, SearchSortNewest:
return true
default:
return false
}
}
type SearchOptions struct {
IncludeClient bool
SortBy SearchSort
FilterPlatform Platform
}
type SearchResults struct {
// Source labels which upstream catalog produced this result set.
// It is a semantic provenance marker, not a provider instance.
Source Source
Projects []ProjectName
}
package types
import "strings"
// Source identifies an upstream catalog where package metadata and artifacts can
// be fetched.
//
// Source is a stable semantic identifier used by CLI/config/storage. It is not
// an execution object.
// - In user input, Source can express either a concrete upstream
// (SourceModrinth) or a routing policy marker (SourceAuto).
// - In result payloads, Source records where data came from.
// - In routing, Source is the key that resolves to one or more providers.
//
// Execution of native upstream APIs is implemented by upstream.Provider.
type Source uint8
const (
SourceAuto Source = iota // policy marker: let routing choose providers
SourceCurseForge
SourceModrinth
SourceGitHub
SourceMCDR
SourceHangar
SourceSpiget
SourceUnknown // sentinel for parse/validation failure
)
func (s Source) String() string {
switch s {
case SourceCurseForge:
return "curseforge"
case SourceModrinth:
return "modrinth"
case SourceGitHub:
return "github"
case SourceMCDR:
return "mcdr"
case SourceHangar:
return "hangar"
case SourceSpiget:
return "spiget"
default:
return "unknown"
}
}
func (s Source) Title() string {
switch s {
case SourceCurseForge:
return "CurseForge"
case SourceModrinth:
return "Modrinth"
case SourceGitHub:
return "GitHub"
case SourceMCDR:
return "MCDR"
case SourceHangar:
return "Hangar"
case SourceSpiget:
return "Spiget"
default:
return "Unknown"
}
}
var sourceByString = map[string]Source{
"auto": SourceAuto,
"": SourceAuto,
"curseforge": SourceCurseForge,
"modrinth": SourceModrinth,
"github": SourceGitHub,
"mcdr": SourceMCDR,
"hangar": SourceHangar,
"spiget": SourceSpiget,
"unknown": SourceUnknown,
}
func ParseSource(s string) Source {
s = strings.ToLower(s)
if v, ok := sourceByString[s]; ok {
return v
}
return SourceUnknown
}
package types
// Package types is a general package for all types used in Lucy.
//
// This package contains ONLY pure domain semantics. It must have no side effects:
// - NO logging (logger.)
// - NO filesystem access (os.)
// - NO panics (panic())
//
// All functions should be deterministic and side-effect free.
import (
"github.com/mclucy/lucy/tools"
)
// RawVersion is the version of a package. Here we expect mods and plugins
// use semver (which they should). A known exception is Minecraft snapshots.
//
// There are several special constants for ambiguous(adaptive) versions.
// You MUST call upstream.InferVersion() before parsing them to ComparableVersion.
type RawVersion string
func (v RawVersion) String() string {
switch v {
case VersionAny, "":
return "any"
case VersionNone:
return "none"
case VersionUnknown:
return "unknown"
case VersionLatest:
return "latest"
case VersionCompatible:
return "compatible"
}
return string(v)
}
func (v RawVersion) CanInfer() bool {
switch v {
case VersionAny, VersionLatest, VersionCompatible:
return true
}
return false
}
func (v RawVersion) IsInvalid() bool {
switch v {
case VersionNone, VersionUnknown:
return true
}
return false
}
var (
VersionAny RawVersion = "any"
VersionNone RawVersion = "none"
VersionUnknown RawVersion = "unknown"
VersionLatest RawVersion = "latest"
VersionCompatible RawVersion = "compatible"
)
// ComparableVersion is an interface for comparable parsed versions.
//
// A nil ComparableVersion represents an invalid or unparseable version.
//
// In principle, you cannot compare two versions with different schemes.
// Implementations should return false for cross-scheme comparisons.
type ComparableVersion interface {
// Compare compares this version with v2.
// It returns:
// - -1 if this version < v2
// - 0 if this version == v2
// - 1 if this version > v2
// The second return value is false when the two versions are not comparable
// (for example, cross-scheme comparisons).
Compare(v2 ComparableVersion) (int, bool)
// Validate returns whether this version has valid, non-zero components.
Validate() bool
// Scheme returns the versioning scheme of this version.
Scheme() VersionScheme
}
type VersionScheme uint8
const (
Semver VersionScheme = iota
// MinecraftSnapshot docs:
// https://zh.minecraft.wiki/w/%E7%89%88%E6%9C%AC%E6%A0%BC%E5%BC%8F#%E5%BF%AB%E7%85%A7%EF%BC%88Snapshot%EF%BC%89
// https://www.minecraft.net/en-us/article/minecraft-new-version-numbering-system
MinecraftSnapshot
MinecraftRelease
)
// Dependency represents a dependency requirement for a package.
//
// DO NOT read the Id.Version field. It is supposed to be empty.
//
// Dependency.Constraint is a 2D array. The outer array is OR and the inner
// array is AND. nil/empty means no constraint (all versions acceptable).
//
// Embedded is true when the dependency is physically bundled inside the
// parent JAR (e.g. NeoForge JarInJar / META-INF/jarjar/). Embedded
// dependencies are satisfied without a separate file in the mods directory.
type Dependency struct {
Id PackageId
Constraint VersionConstraintExpression
Mandatory bool
Embedded bool
}
type VersionConstraintExpression [][]VersionConstraint
type VersionConstraint struct {
Value ComparableVersion
Operator VersionOperator
}
// Inverse inverts the version constraint expression in-place.
func (exps VersionConstraintExpression) Inverse() VersionConstraintExpression {
for i := range exps {
for j := range exps[i] {
exps[i][j].Inverse()
}
}
return exps
}
// Inverse inverts the version constraint in-place.
func (exp *VersionConstraint) Inverse() {
switch exp.Operator {
case OpEq:
exp.Operator = OpNeq
case OpNeq:
exp.Operator = OpEq
case OpGt, OpWeakGt:
exp.Operator = OpLte
case OpGte:
exp.Operator = OpLt
case OpLt:
exp.Operator = OpGte
case OpLte:
exp.Operator = OpGt
}
}
func (d Dependency) Satisfy(id PackageId, v ComparableVersion) bool {
if (id.Platform != d.Id.Platform) || (id.Name != d.Id.Name) {
return false
}
if d.Constraint == nil || tools.IsEmptyVector(d.Constraint) {
return true
}
for _, orStatements := range d.Constraint {
satisfied := true
for _, andStatements := range orStatements {
cmp := andStatements.Operator.Comparator()
if v == nil || andStatements.Value == nil || !cmp(
v,
andStatements.Value,
) {
satisfied = false
break
}
}
if satisfied {
return true
}
}
return false
}
type VersionOperator uint8
type VersionComparator func(p1, p2 ComparableVersion) bool
type semverTuple interface {
Major() uint64
Minor() uint64
Patch() uint64
}
func compareByOperator(op VersionOperator, p1, p2 ComparableVersion) bool {
if p1 == nil || p2 == nil {
return false
}
cmp, ok := p1.Compare(p2)
if !ok {
return false
}
switch op {
case OpEq:
return cmp == 0
case OpNeq:
return cmp != 0
case OpGt:
return cmp > 0
case OpGte:
return cmp >= 0
case OpLt:
return cmp < 0
case OpLte:
return cmp <= 0
default:
return false
}
}
func compareSemverWeakEq(p1, p2 ComparableVersion) bool {
if p1 == nil || p2 == nil {
return false
}
if p1.Scheme() != Semver || p2.Scheme() != Semver {
return false
}
candidate, ok1 := p1.(semverTuple)
base, ok2 := p2.(semverTuple)
if !ok1 || !ok2 {
return false
}
if base.Minor() == 0 && base.Patch() == 0 {
return candidate.Major() == base.Major()
}
return candidate.Major() == base.Major() && candidate.Minor() == base.Minor()
}
func compareSemverWeakGt(p1, p2 ComparableVersion) bool {
if p1 == nil || p2 == nil {
return false
}
if p1.Scheme() != Semver || p2.Scheme() != Semver {
return false
}
candidate, ok1 := p1.(semverTuple)
base, ok2 := p2.(semverTuple)
if !ok1 || !ok2 {
return false
}
if candidate.Major() != base.Major() {
return false
}
return compareByOperator(OpGt, p1, p2)
}
var operatorFunctions = map[VersionOperator]VersionComparator{
OpEq: func(p1, p2 ComparableVersion) bool {
return compareByOperator(
OpEq,
p1,
p2,
)
},
OpWeakEq: compareSemverWeakEq,
OpNeq: func(p1, p2 ComparableVersion) bool {
return compareByOperator(
OpNeq,
p1,
p2,
)
},
OpGt: func(p1, p2 ComparableVersion) bool {
return compareByOperator(
OpGt,
p1,
p2,
)
},
OpWeakGt: compareSemverWeakGt,
OpGte: func(p1, p2 ComparableVersion) bool {
return compareByOperator(
OpGte,
p1,
p2,
)
},
OpLt: func(p1, p2 ComparableVersion) bool {
return compareByOperator(
OpLt,
p1,
p2,
)
},
OpLte: func(p1, p2 ComparableVersion) bool {
return compareByOperator(
OpLte,
p1,
p2,
)
},
}
const (
OpEq VersionOperator = iota
OpWeakEq // for ~ operator in semver
OpNeq
OpGt
OpWeakGt // for ^ operator in semver
OpGte
OpLt
OpLte
)
func (op VersionOperator) String() string {
switch op {
case OpEq:
return "equal"
case OpWeakEq:
return "weak equal"
case OpNeq:
return "not equal"
case OpGt:
return "greater than"
case OpWeakGt:
return "weak greater than"
case OpGte:
return "greater than or equal"
case OpLt:
return "less than"
case OpLte:
return "less than or equal"
default:
return "unknown"
}
}
func (op VersionOperator) ToSign() string {
switch op {
case OpEq:
return "="
case OpWeakEq:
return "~"
case OpNeq:
return "!="
case OpGt:
return ">"
case OpWeakGt:
return "^"
case OpGte:
return ">="
case OpLt:
return "<"
case OpLte:
return "<="
default:
return "unknown"
}
}
func (op VersionOperator) Inverse() VersionOperator {
switch op {
case OpEq:
return OpNeq
case OpNeq:
return OpEq
case OpGt, OpWeakGt:
return OpLte
case OpGte:
return OpLt
case OpLt:
return OpGte
case OpLte:
return OpGt
default:
return op
}
}
func (op VersionOperator) Comparator() VersionComparator {
return operatorFunctions[op]
}
// Package types is a general package for all types used in Lucy.
//
// This package contains ONLY pure domain semantics. It must have no side effects:
// - NO logging (logger.)
// - NO filesystem access (os.)
// - NO panics (panic())
//
// All functions should be deterministic and side-effect free.
package types
import (
"fmt"
"strings"
"github.com/mclucy/lucy/tools"
)
// Platform is an enum of several string constants.
//
// All platform is a package under itself, for example, "fabric/fabric" is a
// valid package, and is equivalent to "fabric". This literal is typically used
// when installing/upgrading a platform itself.
type Platform string
const (
PlatformAny Platform = "" // PlatformAny is ambiguous but has single-valueness. It does NOT refer to multiple platforms, but rather a single platform that is unknown. Understand this as PlatformAny reduces to a definite platform at evaluation. Again, keep in mind that you should not allow it to be explicitly evaluated as multiple platforms.
PlatformMinecraft Platform = "minecraft"
PlatformVanilla = PlatformMinecraft // Alias for Minecraft
PlatformFabric Platform = "fabric"
PlatformForge Platform = "forge"
PlatformNeoforge Platform = "neoforge"
PlatformMCDR Platform = "mcdr"
PlatformBukkit Platform = "bukkit" // Can be comsumed by paper/spigot/craftbukkit/etc.
PlatformSponge Platform = "sponge"
PlatformVelocity Platform = "velocity"
PlatformBungeecord Platform = "bungeecord" // Can be consumed by both waterfall and bungeecord itself
PlatformNone Platform = "none" // PlatformNone is a special platform that is not satisfied by any platform, but it can satisfy all platforms. It is typically used to indicate the absence of a platform, for example, when a package is not compatible with any platform, or when a package does not require a platform.
PlatformUnknown Platform = "unknown" // PlatformUnknown is the only constant with no single-valueness, it can refer to multiple platforms other than the ones defined here.
)
func (p Platform) Title() string {
if p == PlatformAny {
return "Any"
}
if p.Valid() {
return strings.ToUpper(string(p)[0:1]) + string(p)[1:]
}
return "Unknown"
}
func (p Platform) String() string {
if p == PlatformAny {
return "any"
}
return string(p)
}
// Valid
//
// If a platform can be used in a package id, it is a valid platform.
func (p Platform) Valid() bool {
switch p {
case PlatformMinecraft, PlatformFabric, PlatformForge, PlatformNeoforge, PlatformMCDR, PlatformBukkit, PlatformAny, PlatformNone:
return true
}
return false
}
func (p Platform) IsSearchPlatform() bool {
switch p {
case PlatformFabric, PlatformForge, PlatformNeoforge, PlatformBukkit:
return true
default:
return false
}
}
// Satisfy returns true if p satisfies the requirement of p2.
func (p Platform) Satisfy(p2 Platform) bool {
// When p2 is PlatformNone, it is satisfied by all platforms.
if p2 == PlatformNone {
return true
}
// PlatformUnknown is not satisfied by any platform, and does not satisfy
// any platform including itself.
if p == PlatformUnknown || p2 == PlatformUnknown {
return false
}
// When p2 is PlatformAny, it is satisfied by all platforms.
if p2 == PlatformAny {
return true
}
// When p is PlatformAny, it does not satisfy any platform except itself.
if p == PlatformAny {
return false
}
// Trivial cases
return p == p2
}
// Is is just an alias for `==`, they are fully interchangeable. There's no
// restriction on which one to use.
//
// This function does not represent a mathematical equivalence relation, since
// PlatformUnknown should always be unequal to any platform including itself.
// However, rather than using .IsUnknown() function, it is more intuitive to
// just use an equality operator.
//
// This is created to differentiate the meaning of "satisfy" and "is".
// For example, "fabric" satisfies "minecraft", but does not "is" "minecraft".
func (p Platform) Is(p2 Platform) bool {
return p == p2
}
func (p Platform) IsModding() bool {
return p == PlatformFabric || p == PlatformForge || p == PlatformNeoforge
}
func DeclaredModdingPlatformForNode(id RuntimeNodeID) Platform {
switch id {
case "fabric":
return PlatformFabric
case "forge", "arclight":
return PlatformForge
case "neoforge", "youer":
return PlatformNeoforge
case "mcdr":
return PlatformMCDR
case "minecraft":
return PlatformMinecraft
default:
return PlatformNone
}
}
// CanInfer returns true if the platform is ambiguous and can be resolved
// from server context.
func (p Platform) CanInfer() bool {
return p == PlatformAny
}
// ProjectName is the slug of the package, using hyphens as separators. For example,
// "fabric-api".
//
// It is non-case-sensitive, though lowercase is recommended. Underlines '_' are
// equivalent to hyphens.
//
// A slug from an upstream API is preferred, if possible. Otherwise, the slug is
// obtained from the executable file. No exceptions since a package must either
// exist on a remote API or user's local files.
type ProjectName string
// Title Replaces underlines or hyphens with spaces, then capitalize the first
// letter.
func (n ProjectName) Title() string {
return tools.Capitalize(strings.ReplaceAll(string(n), "-", " "))
}
func (n ProjectName) String() string {
return string(n)
}
func (n ProjectName) Pep8String() string {
return strings.ReplaceAll(string(n), "-", "_")
}
type PackageId struct {
Platform Platform
Name ProjectName
Version RawVersion
}
func (p PackageId) NewPackage() Package {
return Package{
Id: PackageId{
Platform: p.Platform,
Name: p.Name,
Version: p.Version,
},
}
}
func (p PackageId) String() string {
return tools.Ternary(
p.Platform == PlatformAny,
"", string(p.Platform)+"/",
) +
string(p.Name) +
tools.Ternary(
p.Version == VersionAny,
"",
"@"+string(p.Version),
)
}
func (p PackageId) StringFull() string {
return p.Platform.String() + "/" + p.StringNameVersion()
}
func (p PackageId) StringNameVersion() string {
return string(p.Name) + "@" + p.Version.String()
}
func (p PackageId) StringPlatformName() string {
return string(p.Platform) + "/" + string(p.Name)
}
var platformByIdentityPackage = map[ProjectName]Platform{
"minecraft": PlatformMinecraft,
"mc": PlatformMinecraft,
"fabric": PlatformFabric,
"fabric-loader": PlatformFabric,
"forge": PlatformForge,
"neoforge": PlatformNeoforge,
"mcdreforged": PlatformMCDR,
"mcdr": PlatformMCDR,
}
var canonicalIdentityPackageByPlatform = map[Platform]ProjectName{
PlatformMinecraft: "minecraft",
PlatformFabric: "fabric",
PlatformForge: "forge",
PlatformNeoforge: "neoforge",
PlatformMCDR: "mcdreforged",
}
func (p PackageId) IsIdentityPackage() bool {
_, exists := platformByIdentityPackage[p.Name]
return exists
}
func (p PackageId) IsValidIdentityPackage() error {
if !p.IsIdentityPackage() {
return nil
}
ErrInvalidPlatformPackage := func(p PackageId) error {
return fmt.Errorf(
"mismatch in an identity package: %s under %s",
p.Name,
p.Platform,
)
}
// Check if platform was explicitly specified and mismatches the identity package's platform
if p.Platform != PlatformAny {
expectedPlatform, _ := platformByIdentityPackage[p.Name]
if p.Platform != expectedPlatform {
return ErrInvalidPlatformPackage(p)
}
}
return nil
}
func (p *PackageId) NormalizeIdentityPackage() {
if !p.IsIdentityPackage() {
return
}
platform := p.Platform
if platform == PlatformAny {
inferred, exists := platformByIdentityPackage[p.Name]
if !exists {
return
}
platform = inferred
p.Platform = platform
}
canonicalName, exists := canonicalIdentityPackageByPlatform[platform]
if !exists {
return
}
if p.Name != canonicalName {
p.Name = canonicalName
if p.Version.CanInfer() {
p.Version = VersionCompatible
}
}
}
func (p PackageId) IdentityToPlatform() Platform {
platform, exists := platformByIdentityPackage[p.Name]
if !exists {
return PlatformUnknown
}
return platform
}
package types
type UrlType uint8
const (
UrlFile UrlType = iota
UrlHome
UrlSource
UrlWiki
UrlForum
UrlIssues
UrlSponsor
UrlMisc
)
func (p UrlType) String() string {
switch p {
case UrlFile:
return "File"
case UrlHome:
return "Homepage"
case UrlSource:
return "Source"
case UrlWiki:
return "Wiki"
case UrlMisc:
return "URL"
default:
return "Unknown"
}
}
type Url struct {
Name string
Type UrlType
Url string
}
// Package is a package identifier with its related information. In principle,
// only packages remote and local can provide a Package.
//
// This is an adapter type that uses composition method to provide a unified
// interface for both local and remote packages. It is used to represent a
// package in the system, and can be used to store information about the package
// such as its dependencies, installation path, and remote source.
type Package struct {
// Id is the basic package identifier
Id PackageId
// Package specific data
Dependencies *PackageDependencies
Local *PackageInstallation
Remote *PackageRemote
// Project data
Supports *PlatformSupport
Information *ProjectInformation
}
// PackageDependencies is one of the optional attributions that can be added to
// a Package struct. It is usually used in any command that requires operating
// local packages, such as `lucy install` or `lucy remove`.
type PackageDependencies struct {
Value []Dependency
Authentic bool
}
// ProjectInformation is a struct that contains informational data about the
// package. It is typically used in `lucy info`.
type ProjectInformation struct {
Title string
Brief string
Description string
DescriptionUrl string
DescriptionIsMarkdown bool
Authors []Person
Urls []Url
License string
}
type (
Person struct {
Name string
Role string
Url string
Email string
}
// PackageInstallation is an optional attribution to types.Package. It is
// used for packages that are known to be installed in the local filesystem.
PackageInstallation struct {
Path string
}
// PackageRemote is an optional attribution to types.Package. It is used to
// represent package's presence in a remote source.
PackageRemote struct {
// Source is the semantic origin label of this package metadata/artifact.
// It is stored and displayed as provenance, not used as an executable
// provider identifier.
Source Source
FileUrl string
Filename string
Hash string // upstream-provided digest; empty if unavailable
HashAlgorithm string // e.g. "sha1", "sha512"; empty if Hash is empty
}
// PlatformSupport reflects the support information of the whole project. For
// specific dependency of a single package, use the PackageDependencies struct.
PlatformSupport struct {
MinecraftVersions []RawVersion
Platforms []Platform
Authentic bool
}
)
package types
import (
"os/exec"
"github.com/mclucy/lucy/exttype"
)
// ServerInfo components that do not exist, use an empty string. Note Runtime
// must exist, otherwise the program will exit; therefore, it is not a pointer.
type ServerInfo struct {
WorkPath string `json:"work_path"`
SavePath string `json:"save_path"`
ModPath []string `json:"mod_path"`
Packages []Package `json:"packages"`
Runtime *RuntimeInfo `json:"runtime,omitempty"`
Activity *ServerActivity `json:"activity,omitempty"`
Environments EnvironmentInfo `json:"environments"`
}
type RuntimeInfo struct {
PrimaryEntrance string `json:"primary_entrance"`
GameVersion RawVersion `json:"game_version"`
BootCommand *exec.Cmd `json:"-"`
Topology *RuntimeTopology `json:"topology,omitempty"`
RuntimeIdentities []PackageId `json:"runtime_identities,omitempty"`
BridgeHints []string `json:"bridge_hints,omitempty"`
}
var UnknownExecutable = &RuntimeInfo{
PrimaryEntrance: "",
GameVersion: VersionUnknown,
BootCommand: nil,
Topology: TopologyUnknown,
}
var NoExecutable = &RuntimeInfo{
PrimaryEntrance: "",
GameVersion: VersionNone,
BootCommand: nil,
Topology: TopologyEmpty,
}
func (e *RuntimeInfo) IsValid() bool {
return e != nil && e.Topology != nil
}
func (e *RuntimeInfo) Analyzable() bool {
return e != nil && e.Topology != nil && len(e.RuntimeIdentities) > 0 && e != NoExecutable && e != UnknownExecutable
}
func (e *RuntimeInfo) RuntimeIdentityPackage(node *TopologyNode) *PackageId {
if e == nil || node == nil {
return nil
}
for i := range e.RuntimeIdentities {
pkg := &e.RuntimeIdentities[i]
if string(pkg.Name) == string(node.ID) {
return pkg
}
}
return nil
}
func (e *RuntimeInfo) PrimaryRuntimeIdentity() *PackageId {
if e == nil || e.Topology == nil {
return nil
}
primaryNode, ok := e.Topology.PrimaryNodeData()
if !ok {
return nil
}
return e.RuntimeIdentityPackage(&primaryNode)
}
func (e *RuntimeInfo) DerivedLoaderVersion() string {
primaryIdentity := e.PrimaryRuntimeIdentity()
if primaryIdentity == nil {
return "unknown"
}
return primaryIdentity.Version.String()
}
func (e *RuntimeInfo) DerivedModLoader() Platform {
if e == nil || e.Topology == nil {
return PlatformNone
}
primary, ok := e.Topology.PrimaryNodeData()
if !ok {
return PlatformNone
}
return DeclaredModdingPlatformForNode(primary.ID)
}
func (e *RuntimeInfo) DerivedServerCore() string {
if e == nil || e.Topology == nil {
return ""
}
primary, ok := e.Topology.PrimaryNodeData()
if !ok {
return ""
}
return string(primary.ID)
}
type ServerActivity struct {
Active bool `json:"active"`
Pid int `json:"pid"`
}
type EnvironmentInfo struct {
Lucy *LucyEnv `json:"lucy,omitempty"`
Mcdr *McdrEnv `json:"mcdr,omitempty"`
}
type McdrEnv struct {
Version RawVersion `json:"version"`
Config *exttype.FileMcdrConfig `json:"config,omitempty"`
}
// LucyEnv is a placeholder for Lucy environment; currently just a boolean
// indicating presence, but can be expanded with more details if needed
type LucyEnv bool
package types
import "slices"
type RuntimeNodeID string
const RuntimeNodeUnknown RuntimeNodeID = ""
type RuntimeRole string
const (
RuntimeRoleModLoader RuntimeRole = "mod_loader" // jvm-injecting mod loaders
RuntimeRolePluginCore RuntimeRole = "plugin_core" // cores based on exposed NMS APIs, e.g. craftbukkit derivatives, velocity, sponge. MCDR is included here for now unless there's a strong reason to separate it.
RuntimeRoleHybrid RuntimeRole = "hybrid" // complex runtimes
RuntimeRoleProxy RuntimeRole = "proxy" // proxy servers that do not actually host a Minecraft runtime, e.g. velocity, bungeecord
RuntimeRoleBridge RuntimeRole = "bridge" // bridge layers, e.g. sinytra connector and kilt
RuntimeRoleProtocolBridge RuntimeRole = "protocol_bridge" // Java <-> Bedrock bridges, dedicated for geyser for now
RuntimeRoleVanilla RuntimeRole = "vanilla" // self-explanatory
RuntimeRoleUnknown RuntimeRole = "" // sentinel value
)
type RuntimeCapability string
const (
CapabilityFabricMods RuntimeCapability = "fabric_mods"
CapabilityForgeMods RuntimeCapability = "forge_mods"
CapabilityNeoforgeMods RuntimeCapability = "neoforge_mods"
CapabilityBukkitPlugins RuntimeCapability = "bukkit_plugins"
CapabilityVelocityPlugins RuntimeCapability = "velocity_plugins"
CapabilityBungeecordPlugins RuntimeCapability = "bungeecord_plugins"
CapabilityMCDRPlugins RuntimeCapability = "mcdr_plugins"
CapabilitySpongePlugins RuntimeCapability = "sponge_plugins"
CapabilityProxying RuntimeCapability = "proxying"
CapabilityProtocolBridge RuntimeCapability = "protocol_bridge"
)
type RuntimeRiskLevel int
const (
RiskNone RuntimeRiskLevel = 0
RiskLow RuntimeRiskLevel = 1
RiskMedium RuntimeRiskLevel = 2
RiskHigh RuntimeRiskLevel = 3
RiskCritical RuntimeRiskLevel = 4
)
type CompatVerdict string
const (
CompatCompatible CompatVerdict = "compatible"
CompatDegraded CompatVerdict = "degraded"
CompatIncompatible CompatVerdict = "incompatible"
CompatUnresolved CompatVerdict = "unresolved"
)
// CompatResult reports only the compatibility verdict and its explanation.
// Runtime risk is tracked on topology nodes, not on compat results or edges.
type CompatResult struct {
Verdict CompatVerdict `json:"verdict"`
Reason string `json:"reason"`
Detail string `json:"detail"`
}
// CompatPolicy describes the compatibility relationship between a server runtime
// and package ecosystem. All edges are directed: "can runtime A host packages for ecosystem B?"
type CompatPolicy struct {
// HostNodeID is the runtime that hosts/runs the packages.
HostNodeID RuntimeNodeID `json:"host_node_id"`
// PackageEcosystem is the capability (ecosystem) the packages belong to.
PackageEcosystem RuntimeCapability `json:"package_ecosystem"`
// Verdict is the base verdict for this relationship (without bridge layers).
Verdict CompatVerdict `json:"verdict"`
// Reason is a machine-readable code for why this verdict was reached.
Reason string `json:"reason"`
}
// RuntimeNode describes a materialized runtime layer. RiskLevel is node-scoped and
// may be folded across connected topology components during enrichment.
type RuntimeNode struct {
ID RuntimeNodeID `json:"id"`
Role RuntimeRole `json:"role"`
Capabilities []RuntimeCapability `json:"capabilities"`
RiskLevel RuntimeRiskLevel `json:"risk_level"`
}
type TopologyNode = RuntimeNode
func (n RuntimeNode) HasCapability(c RuntimeCapability) bool {
return slices.Contains(n.Capabilities, c)
}
// RuntimeEdgeVerb describes the type of relationship between two nodes in the topology.
type RuntimeEdgeVerb string
const (
EdgeAdapts RuntimeEdgeVerb = "adapts" // marked for removal; the adapted environment will be represented as a runtime capability rather than a separate node in the topology
EdgeBridges RuntimeEdgeVerb = "bridges" // meaningless; marked for removal
EdgeRoutes RuntimeEdgeVerb = "routes" // meaningless; marked for removal
EdgeHosts RuntimeEdgeVerb = "hosts" // when a node hosts another node, e.g. a neoforge server hosting a sinytra layer
EdgeImplements RuntimeEdgeVerb = "implements" // a full implementation of another runtime, e.g. purpur implementing paper. this type of relationship is sometimes folded into the node's runtime role and capabilities. one example is that paper -> spigot -> craftbukkit, where paper will be a single node with a role of plugin_core and capabilities of both paper, spigot, and craftbukkit rather than separate nodes for each layer. This should be used iff. the from node is a noticeble fork of the to node, e.g. purpur -> paper.
EdgeModifies RuntimeEdgeVerb = "modifies" // dedicated for the edge pointing to the vanilla node
EdgeProxies RuntimeEdgeVerb = "proxies" // this is preserved for the capibility of multi-server modelling, e.g. velocity proxying to a paper server. No actual usage of this verb yet.
)
// RuntimeEdge records only structural relationships between runtime nodes.
// Compatibility severity is expressed via CompatVerdict, while risk remains node-only.
type RuntimeEdge struct {
From RuntimeNodeID `json:"from"`
To RuntimeNodeID `json:"to"`
Verb RuntimeEdgeVerb `json:"verb"`
}
type RuntimeTopology struct {
PrimaryNode RuntimeNodeID `json:"primary_node"`
Nodes []RuntimeNode `json:"nodes"`
Edges []RuntimeEdge `json:"edges"`
}
var (
TopologyEmpty = &RuntimeTopology{}
TopologyUnknown = &RuntimeTopology{
PrimaryNode: "unknown",
Nodes: []RuntimeNode{{ID: "unknown", Role: RuntimeRoleUnknown}},
Edges: nil,
}
)
func (t *RuntimeTopology) Resolved() bool {
return t != nil && t.PrimaryNode != RuntimeNodeUnknown && len(t.Nodes) > 0
}
func (t *RuntimeTopology) FindNode(id RuntimeNodeID) (RuntimeNode, bool) {
if t == nil {
return RuntimeNode{}, false
}
for _, node := range t.Nodes {
if node.ID == id {
return node, true
}
}
return RuntimeNode{}, false
}
func (t *RuntimeTopology) HasCapability(c RuntimeCapability) bool {
if t == nil {
return false
}
for _, node := range t.Nodes {
if node.HasCapability(c) {
return true
}
}
return false
}
func (t *RuntimeTopology) PrimaryNodeData() (RuntimeNode, bool) {
if t == nil {
return RuntimeNode{}, false
}
return t.FindNode(t.PrimaryNode)
}
// EdgesFrom returns all edges originating from a given node.
func (t *RuntimeTopology) EdgesFrom(id RuntimeNodeID) []RuntimeEdge {
if t == nil {
return []RuntimeEdge{}
}
edges := make([]RuntimeEdge, 0)
for _, edge := range t.Edges {
if edge.From == id {
edges = append(edges, edge)
}
}
return edges
}
// EdgesTo returns all edges pointing to a given node.
func (t *RuntimeTopology) EdgesTo(id RuntimeNodeID) []RuntimeEdge {
if t == nil {
return []RuntimeEdge{}
}
edges := make([]RuntimeEdge, 0)
for _, edge := range t.Edges {
if edge.To == id {
edges = append(edges, edge)
}
}
return edges
}
// NodesWithCapability returns all nodes that have the given capability.
func (t *RuntimeTopology) NodesWithCapability(c RuntimeCapability) []RuntimeNode {
if t == nil {
return []RuntimeNode{}
}
nodes := make([]RuntimeNode, 0)
for _, node := range t.Nodes {
if node.HasCapability(c) {
nodes = append(nodes, node)
}
}
return nodes
}
// PrimaryCapabilities returns the capabilities of the primary node only.
// Returns nil if topology is unresolved.
func (t *RuntimeTopology) PrimaryCapabilities() []RuntimeCapability {
if t == nil {
return []RuntimeCapability{}
}
if !t.Resolved() {
return nil
}
primaryNode, ok := t.PrimaryNodeData()
if !ok {
return nil
}
return append([]RuntimeCapability(nil), primaryNode.Capabilities...)
}
// Package curseforge provides functions to interact with CurseForge API.
//
// CurseForge identifies mods by numeric modId, not by slug. Slug resolution
// is done via the search endpoint with the slug query parameter.
//
// All API requests require an x-api-key header. The key is injected at build
// time via ldflags into the ApiKey variable.
package curseforge
import (
"fmt"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
type provider struct{}
var Provider provider
func (provider) Source() types.Source {
return types.SourceCurseForge
}
// Search queries the CurseForge /v1/mods/search endpoint.
func (provider) Search(
query string,
options types.SearchOptions,
) (res upstream.RawSearchResults, err error) {
u := searchUrl(types.ProjectName(query), options)
logger.Debug("searching via curseforge api: " + u)
resp := &searchResponse{}
if err := get(u, resp); err != nil {
return nil, err
}
return resp, nil
}
// Fetch resolves the package version, then fetches the corresponding file.
func (p provider) Fetch(id types.PackageId) (
remote upstream.RawPackageRemote,
err error,
) {
mod, err := resolveSlug(id.Name)
if err != nil {
return nil, err
}
file, err := getFileByDisplayName(mod.Id, string(id.Version), id.Platform)
if err != nil {
return nil, err
}
return file, nil
}
// Information resolves a project slug and returns project metadata.
func (provider) Information(name types.ProjectName) (
info upstream.RawProjectInformation,
err error,
) {
mod, err := resolveSlug(name)
if err != nil {
return nil, err
}
description, err := getModDescription(mod.Id)
if err != nil {
return nil, err
}
return rawProjectInformation{mod: mod, description: description}, nil
}
func (p provider) Dependencies(
id types.PackageId,
) (deps upstream.RawPackageDependencies, err error) {
// Resolve the mod to get the modId
mod, err := resolveSlug(id.Name)
if err != nil {
return nil, err
}
// Get the specific file matching the version
file, err := getFileByDisplayName(mod.Id, string(id.Version), id.Platform)
if err != nil {
return nil, err
}
// If no specific version, get latest release
if file == nil {
file, err = latestCompatibleFile(mod.Id, id.Platform)
if err != nil {
return nil, err
}
}
return &curseforgeDependencies{file: file}, nil
}
// curseforgeDependencies wraps a fileResponse for dependency
// normalization. It implements upstream.RawPackageDependencies.
type curseforgeDependencies struct {
file *fileResponse
}
var _ upstream.RawPackageDependencies = (*curseforgeDependencies)(nil)
func (c *curseforgeDependencies) ToPackageDependencies() types.PackageDependencies {
result := types.PackageDependencies{
Authentic: false,
}
for _, dep := range c.file.Dependencies {
// relationType mapping:
// 1 = EmbeddedLibrary (skip - embedded in the mod itself)
// 2 = OptionalDependency -> Mandatory: false
// 3 = RequiredDependency -> Mandatory: true
// 4 = Tool (skip - not a runtime dependency)
// 5 = Incompatible (skip - breaks compatibility)
// 6 = Include (skip - bundled with the mod)
switch dep.RelationType {
case 2: // OptionalDependency
result.Value = append(result.Value, types.Dependency{
Id: types.PackageId{Name: types.ProjectName(fmt.Sprintf("%d", dep.ModId))},
Mandatory: false,
})
case 3: // RequiredDependency
result.Value = append(result.Value, types.Dependency{
Id: types.PackageId{Name: types.ProjectName(fmt.Sprintf("%d", dep.ModId))},
Mandatory: true,
})
default:
// Skip 1, 4, 5, 6 - not runtime dependencies
continue
}
}
return result
}
func (provider) Support(
name types.ProjectName,
) (supports upstream.RawProjectSupport, err error) {
panic("TODO: implement curseforge provider Support")
}
// ParseAmbiguousId resolves abstract version specifiers (latest,
// compatible, any) to a concrete version by querying the CurseForge API.
func (p provider) ParseAmbiguousId(id types.PackageId) (
parsed types.PackageId,
err error,
) {
if id.Platform.CanInfer() {
// Platform inference removed to avoid circular imports.
// Caller should provide explicit platform.
id.Platform = types.PlatformNone
}
parsed.Platform = id.Platform
parsed.Name = id.Name
var file *fileResponse
switch id.Version {
case types.VersionCompatible:
mod, err := resolveSlug(id.Name)
if err != nil {
return id, err
}
file, err = latestCompatibleFile(mod.Id, id.Platform)
if err != nil {
return id, err
}
case types.VersionAny, types.VersionNone, types.VersionLatest:
mod, err := resolveSlug(id.Name)
if err != nil {
return id, err
}
file, err = latestFile(mod.Id)
if err != nil {
return id, err
}
default:
return id, nil
}
parsed.Version = types.RawVersion(file.FileName)
return parsed, nil
}
package curseforge
import (
"errors"
"fmt"
"net/http"
"sync"
)
var (
availabilityOnce sync.Once
availabilityErr error
)
func AvailabilityError() error {
availabilityOnce.Do(func() {
availabilityErr = validateAvailability()
})
return availabilityErr
}
func Enabled() bool {
return AvailabilityError() == nil
}
func validateAvailability() error {
var resp struct {
Data []struct{} `json:"data"`
}
err := get(baseUrl+"/v1/games", &resp)
if err == nil {
return nil
}
if errors.Is(err, ErrNoApiKey) {
return err
}
var apiErr ApiResponseError
if errors.As(err, &apiErr) {
if apiErr.StatusCode == http.StatusBadRequest ||
apiErr.StatusCode == http.StatusForbidden {
return fmt.Errorf("%w: %w", ErrInvalidApiKey, err)
}
}
return nil
}
package curseforge
import (
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
// --- API response wrappers ---
// searchResponse wraps the CurseForge /v1/mods/search response.
type searchResponse struct {
Data []modResponse `json:"data"`
Pagination pagination `json:"pagination"`
}
func (s *searchResponse) ToSearchResults() types.SearchResults {
res := types.SearchResults{
Source: types.SourceCurseForge,
Projects: make([]types.ProjectName, 0, len(s.Data)),
}
for _, mod := range s.Data {
res.Projects = append(res.Projects, syntax.ToProjectName(mod.Slug))
}
return res
}
// modDataResponse wraps /v1/mods/{modId} response.
type modDataResponse struct {
Data modResponse `json:"data"`
}
type stringDataResponse struct {
Data string `json:"data"`
}
// modResponse is the CurseForge Mod schema.
type modResponse struct {
Id int32 `json:"id"`
GameId int32 `json:"gameId"`
Name string `json:"name"`
Slug string `json:"slug"`
Links modLinks `json:"links"`
Summary string `json:"summary"`
Status int32 `json:"status"`
DownloadCount int64 `json:"downloadCount"`
IsFeatured bool `json:"isFeatured"`
PrimaryCategoryId int32 `json:"primaryCategoryId"`
ClassId *int32 `json:"classId"`
Authors []modAuthor `json:"authors"`
Logo *modAsset `json:"logo"`
MainFileId int32 `json:"mainFileId"`
LatestFiles []fileResponse `json:"latestFiles"`
LatestFilesIndexes []fileIndex `json:"latestFilesIndexes"`
DateCreated string `json:"dateCreated"`
DateModified string `json:"dateModified"`
DateReleased string `json:"dateReleased"`
AllowModDistribution *bool `json:"allowModDistribution"`
GamePopularityRank int32 `json:"gamePopularityRank"`
IsAvailable bool `json:"isAvailable"`
ThumbsUpCount int32 `json:"thumbsUpCount"`
}
type rawProjectInformation struct {
mod *modResponse
description string
}
func (m *modResponse) ToProjectInformation() types.ProjectInformation {
return rawProjectInformation{mod: m}.ToProjectInformation()
}
func (r rawProjectInformation) ToProjectInformation() types.ProjectInformation {
m := r.mod
info := types.ProjectInformation{
Title: m.Name,
Brief: m.Summary,
Description: r.description,
DescriptionIsMarkdown: upstream.LooksLikeMarkdown(r.description),
Urls: make([]types.Url, 0),
Authors: make([]types.Person, 0, len(m.Authors)),
}
if m.Links.WebsiteUrl != "" {
info.Urls = append(info.Urls, types.Url{
Name: "Website",
Type: types.UrlHome,
Url: m.Links.WebsiteUrl,
})
}
if m.Links.WikiUrl != "" {
info.Urls = append(info.Urls, types.Url{
Name: "Wiki",
Type: types.UrlWiki,
Url: m.Links.WikiUrl,
})
}
if m.Links.IssuesUrl != "" {
info.Urls = append(info.Urls, types.Url{
Name: "Issues",
Type: types.UrlIssues,
Url: m.Links.IssuesUrl,
})
}
if m.Links.SourceUrl != "" {
info.Urls = append(info.Urls, types.Url{
Name: "Source",
Type: types.UrlSource,
Url: m.Links.SourceUrl,
})
}
for _, author := range m.Authors {
info.Authors = append(info.Authors, types.Person{
Name: author.Name,
Url: author.Url,
})
}
return info
}
type modLinks struct {
WebsiteUrl string `json:"websiteUrl"`
WikiUrl string `json:"wikiUrl"`
IssuesUrl string `json:"issuesUrl"`
SourceUrl string `json:"sourceUrl"`
}
type modAuthor struct {
Id int32 `json:"id"`
Name string `json:"name"`
Url string `json:"url"`
}
type modAsset struct {
Id int32 `json:"id"`
ModId int32 `json:"modId"`
Title string `json:"title"`
Description string `json:"description"`
ThumbnailUrl string `json:"thumbnailUrl"`
Url string `json:"url"`
}
// --- File-level structs ---
// filesResponse wraps /v1/mods/{modId}/files response.
type filesResponse struct {
Data []fileResponse `json:"data"`
Pagination pagination `json:"pagination"`
}
// fileResponse is the CurseForge File schema.
type fileResponse struct {
Id int32 `json:"id"`
GameId int32 `json:"gameId"`
ModId int32 `json:"modId"`
IsAvailable bool `json:"isAvailable"`
DisplayName string `json:"displayName"`
FileName string `json:"fileName"`
ReleaseType int32 `json:"releaseType"` // 1=Release, 2=Beta, 3=Alpha
FileStatus int32 `json:"fileStatus"`
Hashes []fileHash `json:"hashes"`
FileDate string `json:"fileDate"`
FileLength int64 `json:"fileLength"`
DownloadCount int64 `json:"downloadCount"`
// Docs: https://docs.curseforge.com/rest-api/#get-mod-files
DownloadUrl *string `json:"downloadUrl"` // CAN BE NULL
GameVersions []string `json:"gameVersions"`
Dependencies []fileDependency `json:"dependencies"`
IsServerPack *bool `json:"isServerPack"`
ServerPackFileId *int32 `json:"serverPackFileId"`
}
func (f *fileResponse) ToPackageRemote() types.PackageRemote {
remote := types.PackageRemote{
Source: types.SourceCurseForge,
Filename: f.FileName,
}
if f.DownloadUrl != nil {
remote.FileUrl = *f.DownloadUrl
}
// Prefer SHA1 over MD5 (algo 1=sha1, 2=md5)
for _, h := range f.Hashes {
if h.Algo == 1 {
remote.Hash = h.Value
remote.HashAlgorithm = "sha1"
break
}
}
if remote.Hash == "" {
for _, h := range f.Hashes {
if h.Algo == 2 {
remote.Hash = h.Value
remote.HashAlgorithm = "md5"
break
}
}
}
return remote
}
type fileHash struct {
Value string `json:"value"`
Algo int32 `json:"algo"` // 1=Sha1, 2=Md5
}
type fileDependency struct {
ModId int32 `json:"modId"`
RelationType int32 `json:"relationType"` // 3=Required, 2=Optional, etc.
}
type fileIndex struct {
GameVersion string `json:"gameVersion"`
FileId int32 `json:"fileId"`
Filename string `json:"filename"`
ReleaseType int32 `json:"releaseType"`
GameVersionTypeId *int32 `json:"gameVersionTypeId"`
ModLoader *int32 `json:"modLoader"` // 0=Any, 1=Forge, 4=Fabric, 6=NeoForge
}
type pagination struct {
Index int32 `json:"index"`
PageSize int32 `json:"pageSize"`
ResultCount int32 `json:"resultCount"`
TotalCount int32 `json:"totalCount"`
}
package curseforge
import (
"errors"
"fmt"
)
type ApiResponseError struct {
StatusCode int
}
func (e ApiResponseError) Error() string {
return fmt.Sprintf("curseforge: API returned status %d", e.StatusCode)
}
var (
ErrProjectNotFound = errors.New("curseforge: project not found")
ErrAmbiguousSlug = errors.New("curseforge: ambiguous slug, multiple projects matched")
ErrDownloadNotAllowed = errors.New("curseforge: download not allowed by mod author")
ErrNoCompatibleFile = errors.New("curseforge: no compatible file found")
ErrNoApiKey = errors.New("curseforge: API key not configured")
ErrInvalidApiKey = errors.New("curseforge: API key rejected")
ErrApiResponse = func(statusCode int) error {
return ApiResponseError{StatusCode: statusCode}
}
)
package curseforge
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
)
// fingerprintRequest is the body for POST /v1/fingerprints/432.
type fingerprintRequest struct {
Fingerprints []uint32 `json:"fingerprints"`
}
// fingerprintResponse wraps the /v1/fingerprints/432 response.
// Verified against https://docs.curseforge.com/rest-api/#get-fingerprints-matches
type fingerprintResponse struct {
Data struct {
ExactMatches []struct {
Id uint32 `json:"id"`
File struct {
ModId int32 `json:"modId"`
} `json:"file"`
} `json:"exactMatches"`
} `json:"data"`
}
// SlugFromFilePath computes the CurseForge fingerprint of the file at path,
// queries POST /v1/fingerprints/432, and returns the project slug.
// Returns ("", ErrProjectNotFound) if the file is not found on CurseForge.
func SlugFromFilePath(filePath string) (slug string, err error) {
return SlugFromFilePathWithHint(filePath, "")
}
// SlugFromFilePathWithHint is like SlugFromFilePath but accepts an optional
// urlHint slug. URL hint is never trusted on its own — fingerprint always wins.
func SlugFromFilePathWithHint(filePath, urlHint string) (slug string, err error) {
data, err := os.ReadFile(filePath)
if err != nil {
return "", fmt.Errorf("curseforge hash: %w", err)
}
fp := curseForgeFingerprint(data)
return slugFromFingerprint(fp)
}
func slugFromFingerprint(fp uint32) (string, error) {
body, _ := json.Marshal(fingerprintRequest{Fingerprints: []uint32{fp}})
req, err := http.NewRequest(http.MethodPost, baseUrl+"/v1/fingerprints/432", bytes.NewReader(body))
if err != nil {
return "", err
}
req.Header.Set("x-api-key", ApiKey)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
logger.Debug("curseforge fingerprint lookup")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer tools.CloseReader(resp.Body, logger.Warn)
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("curseforge: fingerprint lookup returned status %d", resp.StatusCode)
}
raw, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
var result fingerprintResponse
if err := json.Unmarshal(raw, &result); err != nil {
return "", ErrProjectNotFound
}
if len(result.Data.ExactMatches) == 0 {
return "", ErrProjectNotFound
}
modId := result.Data.ExactMatches[0].File.ModId
mod, err := getModById(modId)
if err != nil {
return "", err
}
return mod.Slug, nil
}
// curseForgeFingerprint computes the CurseForge custom MurmurHash2 fingerprint.
// Algorithm: strip whitespace bytes (0x09, 0x0A, 0x0D, 0x20), then apply
// a custom MurmurHash2-like mixing with multiplex=1540483477.
// Reference: https://github.com/meza/curseforge-fingerprint
func curseForgeFingerprint(data []byte) uint32 {
const multiplex uint32 = 1540483477
normalizedLen := uint32(0)
for _, b := range data {
if !isCFWhitespace(b) {
normalizedLen++
}
}
h := uint32(1) ^ normalizedLen
var pending uint32
var pendingBits uint32
for _, b := range data {
if isCFWhitespace(b) {
continue
}
pending |= uint32(b) << pendingBits
pendingBits += 8
if pendingBits == 32 {
k := pending * multiplex
k = (k ^ k>>24) * multiplex
h = h*multiplex ^ k
pending = 0
pendingBits = 0
}
}
if pendingBits > 0 {
h = (h ^ pending) * multiplex
}
h = (h ^ h>>13) * multiplex
return h ^ h>>15
}
func isCFWhitespace(b byte) bool {
return b == 0x09 || b == 0x0A || b == 0x0D || b == 0x20
}
package curseforge
import (
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"sync"
"github.com/mclucy/lucy/internal/cipher"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
)
// Docs: https://docs.curseforge.com/rest-api/
const baseUrl = "https://api.curseforge.com"
var (
ApiKey string
apiKeyMut sync.Once
)
// get performs an authenticated GET request to the CurseForge API and
// unmarshals the JSON response into dest.
func get(url string, dest any) error {
apiKeyMut.Do(func() {
key, err := cipher.Decode()
if err != nil {
panic(err)
}
ApiKey = strings.TrimSpace(key)
})
if ApiKey == "" {
return ErrNoApiKey
}
logger.Debug("curseforge api: GET " + url)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return fmt.Errorf("curseforge: failed to create request: %w", err)
}
req.Header.Set("x-api-key", ApiKey)
req.Header.Set("Accept", "application/json")
res, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("curseforge: request failed: %w", err)
}
defer tools.CloseReader(res.Body, logger.Warn)
if res.StatusCode != http.StatusOK {
return ErrApiResponse(res.StatusCode)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("curseforge: failed to read response: %w", err)
}
if err := json.Unmarshal(body, dest); err != nil {
return fmt.Errorf("curseforge: failed to parse response: %w", err)
}
return nil
}
package curseforge
func selectLatestReleaseFile(files []fileResponse) *fileResponse {
var latest *fileResponse
for i := range files {
candidate := &files[i]
if !candidate.IsAvailable || candidate.ReleaseType != 1 {
continue
}
if latest == nil || candidate.FileDate > latest.FileDate {
latest = candidate
}
}
return latest
}
func selectFileByVersion(files []fileResponse, version string) *fileResponse {
for i := range files {
candidate := &files[i]
if !candidate.IsAvailable {
continue
}
if candidate.DisplayName == version || candidate.FileName == version {
return candidate
}
}
return nil
}
package curseforge
import (
"github.com/mclucy/lucy/slugmap"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
)
// resolveSlug resolves a project slug to its mod data by searching with the
// slug parameter. CurseForge has no "get by slug" endpoint, so we search with
// the slug query parameter and look for an exact match.
// Docs: https://docs.curseforge.com/rest-api/#search-mods
func resolveSlug(slug types.ProjectName) (*modResponse, error) {
// Canonicalize slug-like name to provider canonical slug before search.
if canonical, ok := slugmap.Default().GetLoose(types.SourceCurseForge, string(slug)); ok {
slug = types.ProjectName(canonical)
}
u := slugSearchUrl(slug)
var resp searchResponse
if err := get(u, &resp); err != nil {
return nil, err
}
if len(resp.Data) == 0 {
return nil, ErrProjectNotFound
}
// If exactly one result, use it.
if len(resp.Data) == 1 {
return &resp.Data[0], nil
}
// Multiple results — find exact slug match.
for i := range resp.Data {
if syntax.ToProjectName(resp.Data[i].Slug) == slug {
return &resp.Data[i], nil
}
}
return nil, ErrAmbiguousSlug
}
// getModById fetches a mod by its numeric ID.
// Docs: https://docs.curseforge.com/rest-api/#get-mod
func getModById(modId int32) (*modResponse, error) {
u := modUrl(modId)
var resp modDataResponse
if err := get(u, &resp); err != nil {
return nil, err
}
return &resp.Data, nil
}
func getModDescription(modId int32) (string, error) {
u := modDescriptionUrl(modId, true)
var resp stringDataResponse
if err := get(u, &resp); err != nil {
return "", err
}
return resp.Data, nil
}
package curseforge
import (
"fmt"
"net/url"
"github.com/mclucy/lucy/types"
)
const (
minecraftGameId = 432
modsClassId = 6
)
// modLoaderType maps lucy Platform to CurseForge ModLoaderType enum.
// Docs: https://docs.curseforge.com/rest-api/#search-mods
func modLoaderType(p types.Platform) int {
switch p {
case types.PlatformForge:
return 1
case types.PlatformFabric:
return 4
case types.PlatformNeoforge:
return 6
default:
return 0 // Any
}
}
// curseforgeSearchSortField maps lucy SearchSort to CurseForge
// ModsSearchSortField enum.
// Docs: https://docs.curseforge.com/rest-api/#search-mods
func curseforgeSearchSortField(sort types.SearchSort) int {
switch sort {
case types.SearchSortRelevance:
return 2 // Popularity
case types.SearchSortDownloads:
return 6 // TotalDownloads
case types.SearchSortNewest:
return 11 // ReleasedDate
case types.SearchSortName:
return 4 // Name
default:
return 2 // Popularity
}
}
// searchSortOrder returns the sort order string for the given sort.
func searchSortOrder(sort types.SearchSort) string {
if sort == types.SearchSortName {
return "asc"
}
return "desc"
}
// searchUrl builds the search URL for the CurseForge /v1/mods/search endpoint.
// Docs: https://docs.curseforge.com/rest-api/#search-mods
func searchUrl(query types.ProjectName, options types.SearchOptions) string {
params := url.Values{}
params.Set("gameId", fmt.Sprintf("%d", minecraftGameId))
params.Set("classId", fmt.Sprintf("%d", modsClassId))
params.Set("searchFilter", string(query))
params.Set("sortField", fmt.Sprintf("%d", curseforgeSearchSortField(options.SortBy)))
params.Set("sortOrder", searchSortOrder(options.SortBy))
params.Set("pageSize", "50")
if loader := modLoaderType(options.FilterPlatform); loader != 0 {
params.Set("modLoaderType", fmt.Sprintf("%d", loader))
}
return baseUrl + "/v1/mods/search?" + params.Encode()
}
// slugSearchUrl builds a URL to find a mod by its exact slug.
// Docs: https://docs.curseforge.com/rest-api/#search-mods
func slugSearchUrl(slug types.ProjectName) string {
params := url.Values{}
params.Set("gameId", fmt.Sprintf("%d", minecraftGameId))
params.Set("classId", fmt.Sprintf("%d", modsClassId))
params.Set("slug", string(slug))
params.Set("pageSize", "50")
return baseUrl + "/v1/mods/search?" + params.Encode()
}
// modUrl builds the URL for getting a mod by its numeric ID.
// Docs: https://docs.curseforge.com/rest-api/#get-mod
func modUrl(modId int32) string {
return fmt.Sprintf("%s/v1/mods/%d", baseUrl, modId)
}
// modDescriptionUrl builds the URL for getting a mod's long description.
// Docs: https://docs.curseforge.com/rest-api/#get-mod-description
func modDescriptionUrl(modId int32, stripped bool) string {
params := url.Values{}
if stripped {
params.Set("stripped", "true")
}
u := fmt.Sprintf("%s/v1/mods/%d/description", baseUrl, modId)
if len(params) == 0 {
return u
}
return u + "?" + params.Encode()
}
// modFilesUrl builds the URL for listing files of a mod, with optional
// filtering by game version and mod loader.
// Docs: https://docs.curseforge.com/rest-api/#get-mod-files
func modFilesUrl(modId int32, gameVersion string, loaderType int) string {
params := url.Values{}
params.Set("pageSize", "50")
if gameVersion != "" {
params.Set("gameVersion", gameVersion)
}
if loaderType != 0 {
params.Set("modLoaderType", fmt.Sprintf("%d", loaderType))
}
return fmt.Sprintf("%s/v1/mods/%d/files?%s", baseUrl, modId, params.Encode())
}
package curseforge
import (
"github.com/mclucy/lucy/types"
)
// listFiles fetches files for a mod with optional filtering by game version
// and mod loader type.
// Docs: https://docs.curseforge.com/rest-api/#get-mod-files
func listFiles(modId int32, gameVersion string, loaderType int) (
[]fileResponse, error,
) {
u := modFilesUrl(modId, gameVersion, loaderType)
var resp filesResponse
if err := get(u, &resp); err != nil {
return nil, err
}
return resp.Data, nil
}
// latestFile finds the latest release file for a mod (no version/platform
// filtering).
func latestFile(modId int32) (*fileResponse, error) {
files, err := listFiles(modId, "", 0)
if err != nil {
return nil, err
}
latest := selectLatestReleaseFile(files)
if latest == nil {
return nil, ErrNoCompatibleFile
}
if latest.DownloadUrl == nil {
return nil, ErrDownloadNotAllowed
}
return latest, nil
}
// latestCompatibleFile finds the latest release file compatible with the
// current server's game version and platform.
func latestCompatibleFile(modId int32, platform types.Platform) (
*fileResponse, error,
) {
// Platform inference removed to avoid circular imports.
// Caller should provide explicit platform or this will use latest.
_ = platform
files, err := listFiles(modId, "", 0)
if err != nil {
return nil, err
}
latest := selectLatestReleaseFile(files)
if latest == nil {
return nil, ErrNoCompatibleFile
}
if latest.DownloadUrl == nil {
return nil, ErrDownloadNotAllowed
}
return latest, nil
}
// getFileByDisplayName finds a file matching a specific version string.
// It checks DisplayName and FileName for a match.
func getFileByDisplayName(
modId int32,
version string,
platform types.Platform,
) (*fileResponse, error) {
loaderType := modLoaderType(platform)
files, err := listFiles(modId, "", loaderType)
if err != nil {
return nil, err
}
selected := selectFileByVersion(files, version)
if selected == nil {
return nil, ErrNoCompatibleFile
}
if selected.DownloadUrl == nil {
return nil, ErrDownloadNotAllowed
}
return selected, nil
}
package githubsource
import (
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
type provider struct{}
var Provider provider
func (provider) Source() types.Source {
return types.SourceGitHub
}
func (provider) Search(
query string,
options types.SearchOptions,
) (res upstream.RawSearchResults, err error) {
panic("TODO: implement github provider Search")
}
func (provider) Fetch(
id types.PackageId,
) (remote upstream.RawPackageRemote, err error) {
panic("TODO: implement github provider Fetch")
}
func (provider) Information(
name types.ProjectName,
) (info upstream.RawProjectInformation, err error) {
panic("TODO: implement github provider Information")
}
func (provider) Dependencies(
id types.PackageId,
) (deps upstream.RawPackageDependencies, err error) {
panic("TODO: implement github provider Dependencies")
}
func (provider) Support(
name types.ProjectName,
) (supports upstream.RawProjectSupport, err error) {
panic("TODO: implement github provider Support")
}
func (provider) ParseAmbiguousId(
id types.PackageId,
) (parsed types.PackageId, err error) {
panic("TODO: implement github provider ParseAmbiguousId")
}
package hangar
import (
"fmt"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
type provider struct{}
var Provider provider
func (provider) Source() types.Source {
return types.SourceHangar
}
func (provider) Search(
query string,
options types.SearchOptions,
) (res upstream.RawSearchResults, err error) {
return searchProjects(query, options)
}
func (p provider) Fetch(id types.PackageId) (
remote upstream.RawPackageRemote,
err error,
) {
version, err := getVersion(id)
if err != nil {
return nil, err
}
preferredPlatform := preferredDownloadPlatform(id.Platform)
if _, ok := version.ToPackageRemoteForPlatform(preferredPlatform); ok {
return version, nil
}
if remote := version.ToPackageRemote(); remote.FileUrl != "" {
return version, nil
}
return nil, ErrNoDownload
}
func (p provider) Information(name types.ProjectName) (
info upstream.RawProjectInformation,
err error,
) {
return getProject(name)
}
func (p provider) Support(name types.ProjectName) (
supports upstream.RawProjectSupport,
err error,
) {
return getProject(name)
}
func (p provider) Dependencies(id types.PackageId) (
deps upstream.RawPackageDependencies,
err error,
) {
version, err := getVersion(id)
if err != nil {
return nil, fmt.Errorf("hangar: dependencies fetch failed: %w", err)
}
return &hangarDependencies{version: version, platform: id.Platform}, nil
}
func (p provider) ParseAmbiguousId(id types.PackageId) (
parsed types.PackageId,
err error,
) {
if id.Platform.CanInfer() {
id.Platform = types.PlatformNone
}
if !id.Version.CanInfer() {
return id, nil
}
version, err := resolveVersion(id)
if err != nil {
return id, err
}
parsed = id
parsed.Version = types.RawVersion(version.Name)
return parsed, nil
}
package hangar
import (
"slices"
"sort"
"strings"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
const hangarSiteBaseURL = "https://hangar.papermc.io"
type hangarProjectRef struct {
Owner string
Slug string
}
func (p hangarProject) ProjectRef() hangarProjectRef {
return p.Namespace.ProjectRef()
}
func (n hangarProjectNamespace) ProjectRef() hangarProjectRef {
return hangarProjectRef{Owner: n.Owner, Slug: n.Slug}
}
func (r hangarProjectRef) CanonicalName() types.ProjectName {
return syntax.ToProjectName(r.Slug)
}
func (r hangarProjectRef) LookupPath() string {
if r.Owner == "" {
return r.Slug
}
return r.Owner + "/" + r.Slug
}
func (r hangarProjectRef) ProjectURL() string {
if r.Owner == "" || r.Slug == "" {
return ""
}
return hangarSiteBaseURL + "/" + r.Owner + "/" + r.Slug
}
func (s *projectSearchResponse) ToSearchResults() types.SearchResults {
res := types.SearchResults{
Source: types.SourceHangar,
Projects: make([]types.ProjectName, 0, len(s.Result)),
}
for _, project := range s.Result {
res.Projects = append(res.Projects, project.ProjectRef().CanonicalName())
}
return res
}
func (p *hangarProject) ToProjectInformation() types.ProjectInformation {
info := types.ProjectInformation{
Title: p.Name,
Brief: p.Description,
Description: p.MainPageContent,
DescriptionIsMarkdown: p.MainPageContent != "" && (upstream.LooksLikeMarkdown(p.MainPageContent) || strings.Contains(p.MainPageContent, "#")),
License: firstNonEmpty(p.Settings.License.Name, p.Settings.License.Type),
Authors: make([]types.Person, 0, len(p.MemberNames)),
Urls: make([]types.Url, 0, len(p.Settings.Links)+1),
}
if projectURL := p.ProjectRef().ProjectURL(); projectURL != "" {
info.Urls = append(info.Urls, types.Url{
Name: "Hangar",
Type: types.UrlHome,
Url: projectURL,
})
}
for _, memberName := range p.MemberNames {
info.Authors = append(info.Authors, types.Person{Name: memberName})
}
for _, section := range p.Settings.Links {
for _, link := range section.Links {
if link.URL == "" {
continue
}
info.Urls = append(info.Urls, types.Url{
Name: link.Name,
Type: classifyHangarURL(link.Name),
Url: link.URL,
})
}
}
return info
}
func (p *hangarProject) ToProjectSupport() types.PlatformSupport {
return platformSupportFromMap(p.SupportedPlatforms)
}
func (v *hangarVersion) ToProjectSupport() types.PlatformSupport {
return platformSupportFromMap(v.PlatformDependencies)
}
func (v *hangarVersion) ToPackageRemote() types.PackageRemote {
remote, _ := v.ToPackageRemoteForPlatform(preferredDownloadPlatform(types.PlatformNone))
if remote.FileUrl == "" {
platforms := sortedMapKeys(v.Downloads)
if len(platforms) == 0 {
return types.PackageRemote{Source: types.SourceHangar}
}
remote, _ = v.ToPackageRemoteForPlatform(types.Platform(strings.ToLower(platforms[0])))
}
return remote
}
func (v *hangarVersion) ToPackageRemoteForPlatform(platform types.Platform) (types.PackageRemote, bool) {
download, ok := v.downloadForPlatform(platform)
if !ok {
return types.PackageRemote{Source: types.SourceHangar}, false
}
remote := types.PackageRemote{
Source: types.SourceHangar,
FileUrl: download.URL(),
Filename: download.FileInfo.Name,
}
if download.FileInfo.SHA256Hash != "" {
remote.Hash = download.FileInfo.SHA256Hash
remote.HashAlgorithm = "sha256"
}
return remote, true
}
func (v *hangarVersion) PluginDependencyNames() []types.ProjectName {
depsForPlatform := v.DependenciesForPlatform(types.PlatformNone)
if len(depsForPlatform) == 0 {
return nil
}
deps := make([]types.ProjectName, 0, len(depsForPlatform))
for _, dep := range depsForPlatform {
if dep.Name == "" || dep.ExternalURL != nil {
continue
}
deps = append(deps, syntax.ToProjectName(dep.Name))
}
slices.Sort(deps)
return deps
}
func (v *hangarVersion) DependenciesForPlatform(platform types.Platform) []hangarPluginDependency {
if len(v.PluginDependencies) == 0 {
return nil
}
preferredKey := strings.ToUpper(preferredDownloadPlatform(platform).String())
if deps := v.PluginDependencies[preferredKey]; len(deps) > 0 {
return deps
}
for _, key := range sortedMapKeys(v.PluginDependencies) {
if deps := v.PluginDependencies[key]; len(deps) > 0 {
return deps
}
}
return nil
}
func (v *hangarVersion) HasDownloadForPlatform(platform types.Platform) bool {
_, ok := v.downloadForPlatform(preferredDownloadPlatform(platform))
if ok {
return true
}
return len(v.Downloads) > 0 && platform == types.PlatformNone
}
func (v *hangarVersion) SupportsPlatform(platform types.Platform) bool {
if len(v.PlatformDependencies) == 0 {
return false
}
preferredKey := strings.ToUpper(preferredDownloadPlatform(platform).String())
if versions := v.PlatformDependencies[preferredKey]; len(versions) > 0 {
return true
}
return platform == types.PlatformNone && len(v.PlatformDependencies) > 0
}
func (v *hangarVersion) downloadForPlatform(platform types.Platform) (hangarDownload, bool) {
if len(v.Downloads) == 0 {
return hangarDownload{}, false
}
needle := strings.ToLower(platform.String())
for key, download := range v.Downloads {
if strings.ToLower(key) == needle {
return download, true
}
}
return hangarDownload{}, false
}
func (d hangarDownload) URL() string {
if d.ExternalURL != nil && *d.ExternalURL != "" {
return *d.ExternalURL
}
return d.DownloadURL
}
func platformSupportFromMap(platformVersions hangarPlatformVersionMap) types.PlatformSupport {
support := types.PlatformSupport{
MinecraftVersions: make([]types.RawVersion, 0),
Platforms: make([]types.Platform, 0, len(platformVersions)),
Authentic: true,
}
seenVersions := make(map[string]struct{})
for _, platformKey := range sortedMapKeys(platformVersions) {
support.Platforms = append(support.Platforms, types.Platform(strings.ToLower(platformKey)))
for _, version := range platformVersions[platformKey] {
if _, exists := seenVersions[version]; exists {
continue
}
seenVersions[version] = struct{}{}
support.MinecraftVersions = append(support.MinecraftVersions, types.RawVersion(version))
}
}
return support
}
func classifyHangarURL(name string) types.UrlType {
switch strings.ToLower(name) {
case "issues":
return types.UrlIssues
case "source":
return types.UrlSource
case "wiki":
return types.UrlWiki
case "support", "discord":
return types.UrlForum
case "website", "homepage", "hangar":
return types.UrlHome
default:
return types.UrlMisc
}
}
func firstNonEmpty(values ...string) string {
for _, value := range values {
if value != "" {
return value
}
}
return ""
}
func sortedMapKeys[T any](m map[string]T) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
package hangar
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
const hangarAPIBaseURL = "https://hangar.papermc.io/api/v1"
var (
ErrInvalidAPIResponse = errors.New("hangar: invalid api response")
ErrNoProject = errors.New("hangar: project not found")
ErrNoVersion = errors.New("hangar: version not found")
ErrNoDownload = errors.New("hangar: download not found")
)
func getProject(name types.ProjectName) (*hangarProject, error) {
if project, err := getProjectByPath(string(name)); err == nil {
if project.ProjectRef().CanonicalName() == name {
return project, nil
}
}
search, err := searchProjects(string(name), types.SearchOptions{})
if err != nil {
return nil, err
}
for _, project := range search.Result {
if project.ProjectRef().CanonicalName() == name {
return getProjectByRef(project.ProjectRef())
}
}
return nil, ErrNoProject
}
func getProjectByRef(ref hangarProjectRef) (*hangarProject, error) {
if ref.Owner == "" || ref.Slug == "" {
return nil, ErrNoProject
}
return getProjectByPath(ref.LookupPath())
}
func getProjectByPath(path string) (*hangarProject, error) {
project := &hangarProject{}
if err := getJSON(hangarProjectURL(path), project); err != nil {
return nil, err
}
return project, nil
}
func searchProjects(query string, options types.SearchOptions) (*projectSearchResponse, error) {
params := url.Values{}
if query != "" {
params.Set("query", query)
}
params.Set("limit", "25")
if platform := searchPlatform(options.FilterPlatform); platform != "" {
params.Set("platform", platform)
}
res := &projectSearchResponse{}
if err := getJSON(hangarProjectsURL(params), res); err != nil {
return nil, err
}
return res, nil
}
func getVersion(id types.PackageId) (*hangarVersion, error) {
project, err := getProject(id.Name)
if err != nil {
return nil, err
}
version := &hangarVersion{}
if err := getJSON(hangarVersionURL(project.ProjectRef(), id.Version.String()), version); err != nil {
return nil, err
}
return version, nil
}
func listVersions(name types.ProjectName) ([]hangarVersion, error) {
project, err := getProject(name)
if err != nil {
return nil, err
}
params := url.Values{}
params.Set("limit", "25")
res := &HangarVersionListResponse{}
if err := getJSON(hangarVersionsURL(project.ProjectRef(), params), res); err != nil {
return nil, err
}
if len(res.Result) == 0 {
return nil, ErrNoVersion
}
return res.Result, nil
}
func getJSON(rawURL string, out any) error {
logger.Debug("hangar request: " + rawURL)
res, err := http.Get(rawURL)
if err != nil {
return fmt.Errorf("hangar: request failed: %w", err)
}
defer tools.CloseReader(res.Body, logger.Warn)
if res.StatusCode == http.StatusNotFound {
return ErrNoProject
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("hangar: unexpected status %d", res.StatusCode)
}
data, err := io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("hangar: failed to read response: %w", err)
}
if err := json.Unmarshal(data, out); err != nil {
return fmt.Errorf("hangar: failed to decode response: %w", err)
}
return nil
}
func hangarProjectsURL(params url.Values) string {
return withQuery(hangarAPIBaseURL+"/projects", params)
}
func hangarProjectURL(path string) string {
return hangarAPIBaseURL + "/projects/" + strings.TrimPrefix(path, "/")
}
func hangarVersionsURL(ref hangarProjectRef, params url.Values) string {
return withQuery(hangarProjectURL(ref.LookupPath())+"/versions", params)
}
func hangarVersionURL(ref hangarProjectRef, version string) string {
return hangarProjectURL(ref.LookupPath()) + "/versions/" + url.PathEscape(version)
}
func withQuery(base string, params url.Values) string {
if len(params) == 0 {
return base
}
return base + "?" + params.Encode()
}
func searchPlatform(platform types.Platform) string {
switch platform {
case types.PlatformBukkit:
return hangarPreferredPlatform
case types.PlatformAny, types.PlatformNone, types.PlatformUnknown:
return hangarPreferredPlatform
default:
return ""
}
}
package hangar
import (
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
const hangarPreferredPlatform = "PAPER"
type hangarDependencies struct {
version *hangarVersion
platform types.Platform
}
var _ upstream.RawPackageDependencies = (*hangarDependencies)(nil)
func (h *hangarDependencies) ToPackageDependencies() types.PackageDependencies {
result := types.PackageDependencies{Authentic: true}
for _, dep := range h.version.DependenciesForPlatform(h.platform) {
if dep.Name == "" || dep.ExternalURL != nil {
continue
}
result.Value = append(result.Value, types.Dependency{
Id: types.PackageId{
Platform: types.PlatformNone,
Name: syntax.ToProjectName(dep.Name),
},
Mandatory: dep.Required,
})
}
return result
}
func resolveVersion(id types.PackageId) (*hangarVersion, error) {
versions, err := listVersions(id.Name)
if err != nil {
return nil, err
}
switch id.Version {
case types.VersionAny, types.VersionNone, types.VersionLatest:
return selectLatestVersion(versions, id.Platform)
case types.VersionCompatible:
return selectLatestCompatibleVersion(versions, id.Platform)
default:
for i := range versions {
if versions[i].Name == id.Version.String() {
return &versions[i], nil
}
}
return nil, ErrNoVersion
}
}
func selectLatestVersion(versions []hangarVersion, platform types.Platform) (*hangarVersion, error) {
if version := firstVersionMatching(versions, platform, false); version != nil {
return version, nil
}
if version := firstVersionMatching(versions, types.PlatformNone, false); version != nil {
return version, nil
}
return nil, ErrNoVersion
}
func selectLatestCompatibleVersion(versions []hangarVersion, platform types.Platform) (*hangarVersion, error) {
if version := firstVersionMatching(versions, platform, true); version != nil {
return version, nil
}
return nil, ErrNoVersion
}
func firstVersionMatching(versions []hangarVersion, platform types.Platform, requireCompatibility bool) *hangarVersion {
for i := range versions {
version := &versions[i]
if !version.HasDownloadForPlatform(platform) {
continue
}
if requireCompatibility && !version.SupportsPlatform(platform) {
continue
}
return version
}
return nil
}
func preferredDownloadPlatform(platform types.Platform) types.Platform {
if platform == types.PlatformAny || platform == types.PlatformNone || platform == types.PlatformUnknown {
return types.Platform("paper")
}
return types.Platform("paper")
}
package mcdr
import (
"fmt"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/probe"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
type provider struct{}
func (s provider) Source() types.Source {
return types.SourceMCDR
}
var Provider provider
// Just a trivial type to implement the SearchResults interface
type mcdrSearchResult []string
func (m mcdrSearchResult) ToSearchResults() types.SearchResults {
var res types.SearchResults
for _, id := range m {
res.Projects = append(res.Projects, syntax.ToProjectName(id))
}
res.Source = types.SourceMCDR
return res
}
// TODO: handle search options
func (s provider) Search(
query string,
options types.SearchOptions,
) (res upstream.RawSearchResults, err error) {
if options.FilterPlatform != types.PlatformMCDR && options.FilterPlatform != types.PlatformAny {
return nil, fmt.Errorf(
"invalid search platform: expected %s, got %s",
types.PlatformMCDR,
options.FilterPlatform,
)
}
res, err = search(query)
return
}
func (s provider) Fetch(id types.PackageId) (
rem upstream.RawPackageRemote,
err error,
) {
rem, err = getRelease(id.Name.Pep8String(), id.Version)
return
}
func (s provider) Information(name types.ProjectName) (
info upstream.RawProjectInformation,
err error,
) {
plugin, err := getInfo(name.Pep8String())
if err != nil {
return nil, err
}
meta, err := getMeta(name.Pep8String())
if err != nil {
return nil, err
}
repo, err := getRepository(name.Pep8String())
if err != nil {
return nil, err
}
info = rawProjectInformation{
Info: plugin,
Meta: meta,
Repository: repo,
}
return info, nil
}
func (s provider) Dependencies(id types.PackageId) (
upstream.RawPackageDependencies,
error,
) {
// TODO implement me
panic("implement me")
}
func (s provider) Support(name types.ProjectName) (
supports upstream.RawProjectSupport,
err error,
) {
// TODO implement me
panic("implement me")
}
func (s provider) ParseAmbiguousId(id types.PackageId) (
parsed types.PackageId,
err error,
) {
var rel *release
switch id.Version {
case types.VersionCompatible:
serverInfo := probe.ServerInfo()
rel, err = getLatestCompatibleRelease(
id.Name.Pep8String(),
serverInfo.Environments.Mcdr.Version,
)
case types.VersionLatest, types.VersionAny:
rel, err = getLatestRelease(id.Name.Pep8String())
if err != nil {
return id, err
}
default:
return id, fmt.Errorf(
"cannot parse version %s for package %s",
id.Version,
id.Name,
)
}
if err != nil {
return id, err
}
parsed = types.PackageId{
Platform: types.PlatformMCDR,
Name: id.Name,
Version: types.RawVersion(rel.Meta.Version),
}
logger.Debug("parsed from" + id.StringFull() + " to " + parsed.StringFull())
return parsed, nil
}
package mcdr
import "errors"
var (
ErrorGhApi = errors.New("error from GitHub API")
ErrPluginNotFound = func(id string) error { return errors.New("plugin not found: " + id) }
ErrVersionNotFound = func(id string, version string) error {
return errors.New("version not found: " + version + " for plugin " + id)
}
)
package mcdr
import (
"github.com/mclucy/lucy/dependency"
"github.com/mclucy/lucy/types"
)
func selectLatestRelease(history *pluginRelease) *release {
if history == nil || len(history.Releases) == 0 {
return nil
}
return &history.Releases[history.LatestVersionIndex]
}
func selectLatestCompatibleRelease(
history *pluginRelease,
localMcdrVersion types.RawVersion,
) (*release, error) {
if history == nil {
return nil, nil
}
mcdrPackage := types.PackageId{
Platform: types.PlatformMCDR,
Name: "mcdreforged",
Version: localMcdrVersion,
}
localVersion, err := dependency.Parse(localMcdrVersion, types.Semver)
if err != nil {
return nil, err
}
for i := range history.Releases {
rel := &history.Releases[i]
rangeExpr, ok := rel.Meta.Dependencies["mcdreforged"]
if !ok {
continue
}
dep := types.Dependency{
Id: mcdrPackage,
Constraint: dependency.ParseRange(
rangeExpr,
dependency.DialectNpmSemver,
types.Semver,
),
Mandatory: true,
}
if dep.Satisfy(mcdrPackage, localVersion) {
return rel, nil
}
}
return nil, nil
}
package mcdr
import (
"encoding/json"
"fmt"
"github.com/mclucy/lucy/github"
"github.com/mclucy/lucy/types"
"github.com/sahilm/fuzzy"
)
const (
pluginCatalogueRepoEndpoint = `https://api.github.com/repos/MCDReforged/PluginCatalogue/contents/`
branchMaster = "?ref=master"
branchCatalogue = "?ref=catalogue" // I haven't figured out the difference yet
branchMeta = "?ref=meta"
)
func search(query string) (mcdrSearchResult, error) {
ghEndpoint := pluginCatalogueRepoEndpoint + ("plugins/") + branchCatalogue
err, msg, items := github.GetDirectoryFromGitHub(ghEndpoint)
if err != nil {
return nil, err
}
if msg != nil && msg.Message != "" {
return nil, fmt.Errorf("%w: %s", ErrorGhApi, msg.Message)
}
pluginIds := make([]string, 0)
for _, file := range items {
pluginIds = append(pluginIds, file.Name)
}
matches := fuzzy.Find(query, pluginIds)
result := make([]string, 0, len(matches))
for _, match := range matches {
result = append(result, pluginIds[match.Index])
}
return result, nil
}
func getInfo(id string) (*pluginInfo, error) {
ghEndpoint := pluginCatalogueRepoEndpoint + ("plugins/") + id + "/plugin_info.json" + branchMaster
var data []byte
err, msg, data := github.GetFileFromGitHub(ghEndpoint)
if err != nil {
return nil, err
}
if msg != nil && msg.Message != "" {
if msg.Status == "404" {
return nil, ErrPluginNotFound(id)
}
return nil, fmt.Errorf("%w: %s", ErrorGhApi, msg.Message)
}
var res github.GhItem
err = json.Unmarshal(data, &res)
if err != nil {
return nil, err
}
var info pluginInfo
err = json.Unmarshal(data, &info)
if err != nil {
return nil, err
}
return &info, nil
}
func getMeta(id string) (*pluginMeta, error) {
ghEndpoint := pluginCatalogueRepoEndpoint + id + "/meta.json" + branchMeta
err, msg, data := github.GetFileFromGitHub(ghEndpoint)
if err != nil {
return nil, err
}
if msg != nil && msg.Message != "" {
if msg.Status == "404" {
return nil, ErrPluginNotFound(id)
}
return nil, fmt.Errorf("%w: %s", ErrorGhApi, msg.Message)
}
var meta pluginMeta
err = json.Unmarshal(data, &meta)
if err != nil {
return nil, err
}
return &meta, nil
}
func getRelease(id string, version types.RawVersion) (*release, error) {
history, err := getReleaseHistory(id)
if err != nil {
return nil, err
}
if version == types.VersionLatest {
return &history.Releases[history.LatestVersionIndex], nil
}
for _, rel := range history.Releases {
if rel.Meta.Version == version.String() {
return &rel, nil
}
}
return nil, ErrVersionNotFound(id, version.String())
}
func getLatestRelease(id string) (*release, error) {
history, err := getReleaseHistory(id)
if err != nil {
return nil, err
}
rel := selectLatestRelease(history)
if rel == nil {
return nil, ErrVersionNotFound(id, "latest")
}
return rel, nil
}
func getLatestCompatibleRelease(id string, localMcdrVersion types.RawVersion) (*release, error) {
history, err := getReleaseHistory(id)
if err != nil {
return nil, err
}
rel, err := selectLatestCompatibleRelease(history, localMcdrVersion)
if err != nil {
return nil, err
}
if rel != nil {
return rel, nil
}
return nil, ErrVersionNotFound(id, "latest compatible")
}
func getReleaseHistory(id string) (*pluginRelease, error) {
ghEndpoint := pluginCatalogueRepoEndpoint + id + "/release.json" + branchMeta
err, msg, data := github.GetFileFromGitHub(ghEndpoint)
if err != nil {
return nil, err
}
if msg != nil && msg.Message != "" {
if msg.Status == "404" {
return nil, ErrPluginNotFound(id)
}
return nil, fmt.Errorf("%w: %s", ErrorGhApi, msg.Message)
}
var releaseHistory pluginRelease
err = json.Unmarshal(data, &releaseHistory)
if err != nil {
return nil, err
}
return &releaseHistory, nil
}
func getRepository(id string) (*pluginRepo, error) {
ghEndpoint := pluginCatalogueRepoEndpoint + id + "/repository.json" + branchMeta
err, msg, data := github.GetFileFromGitHub(ghEndpoint)
if err != nil {
return nil, err
}
if msg != nil && msg.Message != "" {
if msg.Status == "404" {
return nil, ErrPluginNotFound(id)
}
return nil, fmt.Errorf("%w: %s", ErrorGhApi, msg.Message)
}
var repo pluginRepo
err = json.Unmarshal(data, &repo)
if err != nil {
return nil, err
}
return &repo, nil
}
package mcdr
import (
"github.com/mclucy/lucy/dependency"
"github.com/mclucy/lucy/types"
)
func parseRequiredVersion(s string) (reqs []types.VersionConstraint) {
// MCDR metadata dependency requirements are AND criteria split by spaces.
// References:
// - https://docs.mcdreforged.com/en/latest/plugin_dev/metadata.html
// - https://docs.npmjs.com/about-semantic-versioning
expr := dependency.ParseRange(
s,
dependency.InferRangeDialect(types.PlatformMCDR),
types.Semver,
)
if len(expr) == 0 {
return nil
}
return expr[0]
}
package mcdr
import (
"time"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
// SourceGitHub API file ref: https://api.github.com/repos/MCDReforged/PluginCatalogue/contents/plugins/{plugin_name}/plugin_info.json
// The purpose of this file is quite unclear to me.
// For this project, meta.json under the meta branch is more handy.
type pluginInfo struct {
Id string `json:"id"`
Authors []author `json:"authors"`
Repository string `json:"repository"`
Branch string `json:"branch"`
RelatedPath string `json:"related_path"`
Labels []string `json:"labels"`
Introduction struct {
EnUs string `json:"en_us"`
ZhCn string `json:"zh_cn"`
} `json:"introduction"`
}
type author struct {
Name string `json:"name"`
Link string `json:"link"`
}
// SourceGitHub API file ref: https://api.github.com/repos/MCDReforged/PluginCatalogue/{plugin_name}/release.json?ref=meta
type pluginRelease struct {
SchemaVersion int `json:"schema_version"`
Id string `json:"id"`
LatestVersion string `json:"latest_version"`
LatestVersionIndex int `json:"latest_version_index"`
Releases []release `json:"releases"`
}
type release struct {
Url string `json:"url"`
Name string `json:"name"`
TagName string `json:"tag_name"`
CreatedAt time.Time `json:"created_at"`
Description string `json:"description"`
Prerelease bool `json:"prerelease"`
Asset asset `json:"asset"`
Meta pluginMeta `json:"meta"`
}
func (r release) ToPackageRemote() types.PackageRemote {
remote := types.PackageRemote{
Source: types.SourceMCDR,
FileUrl: r.Asset.BrowserDownloadUrl,
Filename: r.Asset.Name,
}
return remote
}
type asset struct {
Id int `json:"id"`
Name string `json:"name"`
Size int `json:"size"`
DownloadCount int `json:"download_count"`
CreatedAt time.Time `json:"created_at"`
BrowserDownloadUrl string `json:"browser_download_url"`
HashMd5 string `json:"hash_md5"`
HashSha256 string `json:"hash_sha256"`
}
// SourceGitHub API file ref: https://api.github.com/repos/MCDReforged/PluginCatalogue/contents/{plugin_name}/meta.json?ref=meta
type pluginMeta struct {
SchemaVersion int `json:"schema_version"`
Id string `json:"id"`
Name string `json:"name"`
Version string `json:"version"`
Link string `json:"link"`
Authors []string `json:"authors"`
Dependencies map[string]string `json:"dependencies"`
Requirements []string `json:"requirements"`
Description struct {
EnUs string `json:"en_us"`
ZhCn string `json:"zh_cn"`
} `json:"description"`
}
// SourceGitHub API file ref: https://api.github.com/repos/MCDReforged/PluginCatalogue/contents/{plugin_name}/repository.json?ref=meta
type pluginRepo struct {
Url string `json:"url"`
Name string `json:"name"`
FullName string `json:"full_name"`
HtmlUrl string `json:"html_url"`
Description string `json:"description"`
Archived bool `json:"archived"`
StargazersCount int `json:"stargazers_count"`
WatchersCount int `json:"watchers_count"`
ForksCount int `json:"forks_count"`
Readme string `json:"readme"`
ReadmeUrl string `json:"readme_url"`
License *struct {
Key string `json:"key"`
Name string `json:"name"`
SpdxId string `json:"spdx_id"`
Url string `json:"url"`
} `json:"license"`
}
// Internal struct to fulfill the upstream.RawProjectInformation interface
type rawProjectInformation struct {
Info *pluginInfo
Meta *pluginMeta
Repository *pluginRepo
}
func (r rawProjectInformation) ToProjectInformation() types.ProjectInformation {
info := types.ProjectInformation{
Title: r.Meta.Name,
Brief: r.Meta.Description.EnUs,
Description: r.Repository.Readme,
DescriptionUrl: r.Repository.HtmlUrl,
DescriptionIsMarkdown: true,
Authors: nil,
Urls: nil,
License: tools.Ternary(
r.Repository.License != nil,
r.Repository.License.Name,
"n/a",
),
}
info.Authors = make([]types.Person, 0)
for _, author := range r.Info.Authors {
info.Authors = append(
info.Authors, types.Person{
Name: author.Name,
Url: author.Link,
},
)
}
info.Urls = make([]types.Url, 0)
info.Urls = append(
info.Urls,
types.Url{
Name: "Plugin Page",
Type: types.UrlHome,
Url: r.Meta.Link,
}, types.Url{
Name: "SourceGitHub Repo",
Type: types.UrlSource,
Url: r.Info.Repository,
},
)
return info
}
// Package modrinth provides functions to interact with Modrinth API.
//
// We use Modrinth terms in private functions:
// - project: A project is a mod, plugin, or resource pack.
// - Version: A version is a release, beta, or alpha version of a project.
//
// Generally, a project in Modrinth is equivalent to a project in Lucy. And
// a version in Modrinth is equivalent to a package in Lucy.
//
// Here, while referring to a project in lucy, we would try to the term "slug"
// to refer to the project (or it's name).
package modrinth
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"path"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
type provider struct{}
func (s provider) Source() types.Source {
return types.SourceModrinth
}
var Provider provider
// Search
//
// For Modrinth search API, see:
// https://docs.modrinth.com/api/operations/searchprojects/
func (s provider) Search(
query string,
options types.SearchOptions,
) (res upstream.RawSearchResults, err error) {
var facets []facetItems
switch options.FilterPlatform {
case types.PlatformForge:
facets = append(facets, facetForgeOnly)
case types.PlatformFabric:
facets = append(facets, facetFabricOnly)
case types.PlatformNeoforge:
facets = append(facets, facetNeoforgeOnly)
case types.PlatformBukkit:
facets = append(facets, facetBukkitOnly)
case types.PlatformAny:
fallthrough
default:
facets = append(facets, facetAllLoaders)
}
if !options.IncludeClient {
facets = append(facets, facetServerSupported)
}
internalOptions := searchOptions{
index: modrinthSearchSortingString(options.SortBy),
facets: facets,
}
searchUrl := searchUrl(types.ProjectName(query), internalOptions)
// Make the call to Modrinth API
logger.Debug("searching via modrinth api: " + searchUrl)
httpRes, err := http.Get(searchUrl)
if err != nil {
return nil, fmt.Errorf("modrinth: search request failed: %w", err)
}
defer tools.CloseReader(httpRes.Body, logger.Warn)
if httpRes.StatusCode != http.StatusOK {
return nil, ErrInvalidAPIResponse
}
data, err := io.ReadAll(httpRes.Body)
if err != nil {
return nil, err
}
res = &searchResultResponse{}
err = json.Unmarshal(data, res)
if err != nil {
return nil, err
}
return res, nil
}
func (s provider) Fetch(id types.PackageId) (
remote upstream.RawPackageRemote,
err error,
) {
version, err := getVersion(id)
if err != nil {
return nil, err
}
if len(version.Files) == 0 || path.Ext(version.Files[0].Filename) != ".jar" {
return nil, ErrUnsupportedFileType
}
return version, nil
}
func (s provider) Information(name types.ProjectName) (
info upstream.RawProjectInformation,
err error,
) {
project, err := getProjectByName(name)
if err != nil {
return nil, err
}
return project, nil
}
// Support from Modrinth API is extremely unreliable. A local check (if any
// files were downloaded) is recommended.
func (s provider) Support(name types.ProjectName) (
supports upstream.RawProjectSupport,
err error,
) {
project, err := getProjectByName(name)
if err != nil {
return nil, err
}
return project, nil
}
var ErrInvalidAPIResponse = errors.New("invalid data from modrinth api")
// Temporary guard: Modrinth can ship non-JAR artifacts such as .mrpack,
// but Lucy does not support installing them yet.
var ErrUnsupportedFileType = errors.New("modrinth: only .jar files are supported")
func (s provider) Dependencies(id types.PackageId) (
deps upstream.RawPackageDependencies,
err error,
) {
version, err := getVersion(id)
if err != nil {
return nil, fmt.Errorf("modrinth: dependencies fetch failed: %w", err)
}
return &modrinthDependencies{version: version, platform: id.Platform}, nil
}
func (s provider) ParseAmbiguousId(p types.PackageId) (
parsed types.PackageId,
err error,
) {
if p.Platform.CanInfer() {
// Platform inference removed to avoid circular imports.
// Caller should provide explicit platform.
p.Platform = types.PlatformNone
}
parsed.Platform = p.Platform
parsed.Name = p.Name
var v *versionResponse
switch p.Version {
case types.VersionCompatible:
v, err = latestCompatibleVersion(p.Name, p.Platform)
case types.VersionAny, types.VersionNone, types.VersionLatest:
v, err = latestVersion(p.Name)
default:
return p, nil
}
if err != nil {
return p, err
}
parsed.Version = types.RawVersion(v.VersionNumber)
return parsed, nil
}
package modrinth
import (
"time"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
// projectResponse
//
// API Example:
// - https://api.modrinth.com/v2/project/P7dR8mSH
// (Fabric API)
// - https://api.modrinth.com/v2/project/1IjD5062
// (Continuity)
type projectResponse struct {
ClientSide string `json:"client_side"`
ServerSide string `json:"server_side"`
GameVersions []string `json:"game_versions"`
Id string `json:"id"`
Slug string `json:"slug"`
ProjectType string `json:"project_type"`
Team string `json:"team"`
Organization string `json:"organization"`
Title string `json:"title"`
Description string `json:"description"`
Body string `json:"body"`
BodyUrl string `json:"body_url"`
Published time.Time `json:"published"`
Updated time.Time `json:"updated"`
Approved time.Time `json:"approved"`
Queued interface{} `json:"queued"`
Status string `json:"status"`
RequestedStatus interface{} `json:"requested_status"`
ModeratorMessage interface{} `json:"moderator_message"`
License struct {
Id string `json:"id"`
Name string `json:"name"`
Url interface{} `json:"url"`
} `json:"license"`
Downloads int `json:"downloads"`
Followers int `json:"followers"`
Categories []string `json:"categories"`
AdditionalCategories []string `json:"additional_categories"`
Loaders []string `json:"loaders"`
Versions []string `json:"versions"`
IconUrl string `json:"icon_url"`
IssuesUrl string `json:"issues_url"`
SourceUrl string `json:"source_url"`
WikiUrl string `json:"wiki_url"`
DiscordUrl string `json:"discord_url"`
DonationUrls []struct {
Id string `json:"id"`
Platform string `json:"platform"`
Url string `json:"url"`
} `json:"donation_urls"`
Gallery []interface{} `json:"gallery"`
Color int `json:"color"`
ThreadId string `json:"thread_id"`
MonetizationStatus string `json:"monetization_status"`
}
func (p *projectResponse) ToProjectSupport() types.PlatformSupport {
supports := types.PlatformSupport{
MinecraftVersions: make([]types.RawVersion, 0),
Platforms: make([]types.Platform, 0),
}
for _, version := range p.GameVersions {
supports.MinecraftVersions = append(
supports.MinecraftVersions,
types.RawVersion(version),
)
}
for _, platform := range p.Loaders {
supports.Platforms = append(
supports.Platforms,
types.Platform(platform),
)
}
return supports
}
func (p *projectResponse) ToProjectInformation() (info types.ProjectInformation) {
info = types.ProjectInformation{
Title: p.Title,
Brief: p.Description,
Description: p.Body,
DescriptionIsMarkdown: upstream.LooksLikeMarkdown(p.Body),
License: p.License.Name,
Urls: make([]types.Url, 0),
}
// Urls
if p.DiscordUrl != "" {
info.Urls = append(
info.Urls, types.Url{
Name: "Discord",
Type: types.UrlForum,
Url: p.DiscordUrl,
},
)
}
if p.IssuesUrl != "" {
info.Urls = append(
info.Urls, types.Url{
Name: "Issues",
Type: types.UrlSource,
Url: "",
},
)
}
if p.SourceUrl != "" {
info.Urls = append(
info.Urls, types.Url{
Name: "Source",
Type: types.UrlSource,
Url: p.SourceUrl,
},
)
}
if p.WikiUrl != "" {
info.Urls = append(
info.Urls, types.Url{
Name: "Wiki",
Type: types.UrlWiki,
Url: p.WikiUrl,
},
)
}
for _, donationUrl := range p.DonationUrls {
info.Urls = append(
info.Urls, types.Url{
Name: donationUrl.Platform,
Type: types.UrlSponsor,
Url: donationUrl.Url,
},
)
}
return info
}
// searchResultResponse
//
// Docs
// https://docs.modrinth.com/api/operations/searchprojects/
//
// Example
// https://api.modrinth.com/v2/search?query=carpet&limit=100&index=relevance&facets=%5B%5B%22server_side:required%22,%22server_side:optional%22%5D%5D
type searchResultResponse struct {
Hits []struct {
ProjectId string `json:"project_id"`
ProjectType string `json:"project_type"`
Slug string `json:"slug"`
Author string `json:"author"`
Title string `json:"title"`
Description string `json:"description"`
Categories []string `json:"categories"`
DisplayCategories []string `json:"display_categories"`
Versions []string `json:"versions"`
Downloads int `json:"downloads"`
Follows int `json:"follows"`
IconUrl string `json:"icon_url"`
DateCreated time.Time `json:"date_created"`
DateModified time.Time `json:"date_modified"`
LatestVersion string `json:"latest_version"`
License string `json:"license"`
ClientSide string `json:"client_side"`
ServerSide string `json:"server_side"`
Gallery []string `json:"gallery"`
FeaturedGallery *string `json:"featured_gallery"`
Color *int `json:"color"`
} `json:"hits"`
Offset int `json:"offset"`
Limit int `json:"limit"`
TotalHits int `json:"total_hits"`
}
func (s *searchResultResponse) ToSearchResults() types.SearchResults {
res := types.SearchResults{
Source: types.SourceModrinth,
Projects: make([]types.ProjectName, 0, s.TotalHits),
}
// The hits should already be sorted by whatever index passed in.
for _, hit := range s.Hits {
res.Projects = append(res.Projects, syntax.ToProjectName(hit.Slug))
}
return res
}
// versionResponse
//
// Docs
// https://docs.modrinth.com/api/operations/getversion/
//
// Example
// https://api.modrinth.com/v2/version/F7LVluUL
type versionResponse struct {
GameVersions []string `json:"game_versions"`
Loaders []string `json:"loaders"`
Id string `json:"id"`
ProjectId string `json:"project_id"`
AuthorId string `json:"author_id"`
Featured bool `json:"featured"`
Name string `json:"name"`
VersionNumber string `json:"version_number"`
Changelog string `json:"changelog"`
ChangelogUrl string `json:"changelog_url"`
DatePublished time.Time `json:"date_published"`
Downloads int `json:"downloads"`
VersionType string `json:"version_type"`
Status string `json:"status"`
RequestedStatus interface{} `json:"requested_status"`
Files []fileResponse `json:"files"`
Dependencies []dependenciesResponse `json:"dependencies"`
}
func (v versionResponse) ToPackageRemote() types.PackageRemote {
remote := types.PackageRemote{
Source: types.SourceModrinth,
FileUrl: v.Files[0].Url,
Filename: v.Files[0].Filename,
}
if h := v.Files[0].Hashes; h.Sha512 != "" {
remote.Hash = h.Sha512
remote.HashAlgorithm = "sha512"
} else if h.Sha1 != "" {
remote.Hash = h.Sha1
remote.HashAlgorithm = "sha1"
}
return remote
}
type dependencyType string
const (
required dependencyType = "required"
optional dependencyType = "optional"
incompatible dependencyType = "incompatible"
embedded dependencyType = "embedded"
)
type fileResponse struct {
Hashes struct {
Sha1 string `json:"sha1"`
Sha512 string `json:"sha512"`
} `json:"hashes"`
Url string `json:"url"`
Filename string `json:"filename"`
Primary bool `json:"primary"`
Size int `json:"size"`
FileType string `json:"file_type"`
}
type dependenciesResponse struct {
VersionId string `json:"version_id"`
ProjectId string `json:"project_id"`
FileName string `json:"file_name"`
DependencyType dependencyType `json:"dependency_type"`
}
// memberResponse
//
// Docs
// https://docs.modrinth.com/api/operations/getprojectteammembers/
//
// Example
// https://api.modrinth.com/v2/project/carpet/members
type memberResponse struct {
Role string `json:"role"`
TeamId string `json:"team_id"`
User userResponse `json:"user"`
Permissions interface{} `json:"permissions"`
Accepted bool `json:"accepted"`
PayoutsSplit interface{} `json:"payouts_split"`
Ordering int `json:"ordering"`
}
// userResponse
//
// # The url can either be an id or username
//
// Example
// https://modrinth.com/user/gnembon
type userResponse struct {
Id string `json:"id"`
Username string `json:"username"`
AvatarUrl string `json:"avatar_url"`
Bio string `json:"bio"`
Created time.Time `json:"created"`
Role string `json:"role"`
Badges int `json:"badges"`
AuthProviders string `json:"auth_providers"`
Email string `json:"email"`
EmailVerified bool `json:"email_verified"`
HasPassword bool `json:"has_password"`
HasTotp bool `json:"has_totp"`
PayoutData string `json:"payout_data"`
StripeCustomerId string `json:"stripe_customer_id"`
AllowFriendRequests bool `json:"allow_friend_requests"`
GithubId string `json:"github_id"`
}
package modrinth
import (
"fmt"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
// modrinthDependencies wraps a Modrinth versionResponse for dependency
// normalization. It implements upstream.RawPackageDependencies.
type modrinthDependencies struct {
version *versionResponse
platform types.Platform
}
var _ upstream.RawPackageDependencies = (*modrinthDependencies)(nil)
func (m *modrinthDependencies) ToPackageDependencies() types.PackageDependencies {
result := types.PackageDependencies{
Authentic: false,
}
for _, dep := range m.version.Dependencies {
if dep.DependencyType == incompatible {
continue
}
parentId := types.PackageId{
Platform: m.platform,
Name: syntax.ToProjectName(m.version.Id),
Version: types.RawVersion(m.version.VersionNumber),
}
depId, err := DependencyToPackage(parentId, &dep)
if err != nil {
logger.ShowInfo(fmt.Sprintf(
"[modrinth] skipping dependency with resolution error: %v", err,
))
continue
}
mandatory := dep.DependencyType == required
result.Value = append(result.Value, types.Dependency{
Id: depId,
Mandatory: mandatory,
})
}
return result
}
package modrinth
import (
"github.com/mclucy/lucy/types"
)
func GetFile(id types.PackageId) (url string, filename string, err error) {
version, err := getVersion(id)
if err != nil {
return "", "", err
}
primary := primaryFile(version.Files)
return primary.Url, primary.Filename, nil
}
func getFile(version *versionResponse) (url string, filename string) {
primary := primaryFile(version.Files)
return primary.Url, primary.Filename
}
func primaryFile(files []fileResponse) (primary fileResponse) {
for _, file := range files {
if file.Primary {
return file
}
}
return files[0]
}
package modrinth
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
)
const versionFileUrlPrefix = "https://api.modrinth.com/v2/version_file/"
// versionFileResponse is the response from GET /v2/version_file/{hash}.
type versionFileResponse struct {
ProjectId string `json:"project_id"`
}
// SlugFromFilePath computes the SHA-1 of the file at path, queries
// Modrinth's single-file hash endpoint, and returns the project slug.
// Returns ("", ENoProject) if the file is not found on Modrinth.
func SlugFromFilePath(filePath string) (slug string, err error) {
return SlugFromFilePathWithHint(filePath, "")
}
// SlugFromFilePathWithHint is like SlugFromFilePath but accepts an optional
// urlHint slug. The hint is verified against the project's version file hashes
// before falling back to the authoritative hash lookup path.
func SlugFromFilePathWithHint(filePath, urlHint string) (slug string, err error) {
sha1hex, err := sha1File(filePath)
if err != nil {
return "", fmt.Errorf("modrinth hash: %w", err)
}
if urlHint != "" && verifySlugBySha1(urlHint, sha1hex) {
return urlHint, nil
}
return SlugFromHash(sha1hex)
}
func verifySlugBySha1(hintSlug, sha1hex string) bool {
u, err := url.JoinPath(projectUrlPrefix, hintSlug, "version")
if err != nil {
return false
}
u += "?include_changelog=false"
logger.Debug("modrinth hint verification: " + u)
resp, err := http.Get(u)
if err != nil {
return false
}
defer tools.CloseReader(resp.Body, logger.Warn)
if resp.StatusCode != http.StatusOK {
return false
}
data, err := io.ReadAll(resp.Body)
if err != nil {
return false
}
var versions []versionResponse
if err := json.Unmarshal(data, &versions); err != nil {
return false
}
for _, version := range versions {
for _, file := range version.Files {
if strings.EqualFold(file.Hashes.Sha1, sha1hex) {
return true
}
}
}
return false
}
// SlugFromHash queries Modrinth for a project by SHA-1 hash using the
// single-file endpoint GET /v2/version_file/{hash}?algorithm=sha1.
func SlugFromHash(sha1hex string) (slug string, err error) {
u := versionFileUrlPrefix + sha1hex + "?algorithm=sha1"
logger.Debug("modrinth hash lookup: " + u)
resp, err := http.Get(u)
if err != nil {
return "", err
}
defer tools.CloseReader(resp.Body, logger.Warn)
if resp.StatusCode == http.StatusNotFound {
return "", ENoProject
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("modrinth: hash lookup returned status %d", resp.StatusCode)
}
data, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
var version versionFileResponse
if err := json.Unmarshal(data, &version); err != nil || version.ProjectId == "" {
return "", ENoProject
}
project, err := getProjectById(version.ProjectId)
if err != nil {
return "", err
}
return project.Slug, nil
}
func sha1File(path string) (string, error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
h := sha1.New()
if _, err := io.Copy(h, f); err != nil {
return "", err
}
return hex.EncodeToString(h.Sum(nil)), nil
}
package modrinth
import "github.com/mclucy/lucy/types"
func selectExactVersion(
versions []*versionResponse,
id types.PackageId,
) *versionResponse {
for _, version := range versions {
if types.RawVersion(version.VersionNumber) == id.Version &&
versionSupportsLoader(version, id.Platform) {
return version
}
}
return nil
}
func selectLatestVersionCandidate(
versions []*versionResponse,
platform types.Platform,
) (*versionResponse, bool) {
return selectLatestVersionByLoader(versions, platform, false)
}
func selectLatestCompatibleVersionCandidate(
versions []*versionResponse,
platform types.Platform,
) (*versionResponse, bool) {
return selectLatestVersionByLoader(versions, platform, true)
}
func selectLatestVersionByLoader(
versions []*versionResponse,
platform types.Platform,
filterByLoader bool,
) (*versionResponse, bool) {
selected := latestReleaseVersion(versions, platform, filterByLoader)
if selected != nil {
return selected, false
}
return latestAnyVersion(versions, platform, filterByLoader), true
}
func latestReleaseVersion(
versions []*versionResponse,
platform types.Platform,
filterByLoader bool,
) *versionResponse {
var selected *versionResponse
for _, version := range versions {
if filterByLoader && !versionSupportsLoader(version, platform) {
continue
}
if version.VersionType == "release" &&
(selected == nil || version.DatePublished.After(selected.DatePublished)) {
selected = version
}
}
return selected
}
func latestAnyVersion(
versions []*versionResponse,
platform types.Platform,
filterByLoader bool,
) *versionResponse {
var selected *versionResponse
for _, version := range versions {
if filterByLoader && !versionSupportsLoader(version, platform) {
continue
}
if selected == nil || version.DatePublished.After(selected.DatePublished) {
selected = version
}
}
return selected
}
package modrinth
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"github.com/mclucy/lucy/slugmap"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
)
func getProjectId(slug types.ProjectName) (id string, err error) {
res, err := http.Get(projectUrl(string(slug)))
if err != nil {
return "", fmt.Errorf("modrinth: request failed: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return "", ENoProject
}
data, err := io.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("modrinth: failed to read response: %w", err)
}
modrinthProject := projectResponse{}
err = json.Unmarshal(data, &modrinthProject)
if err != nil {
return "", ENoProject
}
id = modrinthProject.Id
return
}
func getProjectById(id string) (project *projectResponse, err error) {
res, err := http.Get(projectUrl(id))
if err != nil {
return nil, fmt.Errorf("modrinth: request failed: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, ENoProject
}
data, err := io.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("modrinth: failed to read response: %w", err)
}
project = &projectResponse{}
err = json.Unmarshal(data, project)
if err != nil {
return nil, ENoProject
}
return
}
func getProjectByName(slug types.ProjectName) (
project *projectResponse,
err error,
) {
tryFetch := func(target types.ProjectName) (*projectResponse, error) {
res, err := http.Get(projectUrl(string(target)))
if err != nil {
return nil, fmt.Errorf("modrinth: request failed: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, ENoProject
}
data, err := io.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("modrinth: failed to read response: %w", err)
}
project := &projectResponse{}
if err := json.Unmarshal(data, project); err != nil {
return nil, ENoProject
}
return project, nil
}
project, err = tryFetch(slug)
if err == nil {
return project, nil
}
if canonical, ok := slugmap.Default().GetLoose(types.SourceModrinth, string(slug)); ok && canonical != string(slug) {
return tryFetch(types.ProjectName(canonical))
}
return nil, err
}
func getProjectMembers(id string) (
members []*memberResponse,
err error,
) {
res, err := http.Get(projectMemberUrl(id))
if err != nil {
return nil, fmt.Errorf("modrinth: request failed: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, ENoMember
}
data, err := io.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("modrinth: failed to read response: %w", err)
}
err = json.Unmarshal(data, &members)
if err != nil {
return nil, ENoMember
}
return members, nil
}
var ErrorInvalidDependency = errors.New("invalid dependency")
func DependencyToPackage(
dependent types.PackageId,
dependency *dependenciesResponse,
) (
p types.PackageId,
err error,
) {
var version *versionResponse
var project *projectResponse
// I don't see a case where a package would depend on a project on another
// platform. So, we can safely assume that the platform of the dependent
// package is the same as the platform of the dependency.
p.Platform = dependent.Platform
if dependency.VersionId != "" && dependency.ProjectId != "" {
version, err = getVersionById(dependency.VersionId)
if err != nil {
return p, fmt.Errorf("resolve dependency version: %w", err)
}
project, err = getProjectById(dependency.ProjectId)
if err != nil {
return p, fmt.Errorf("resolve dependency project: %w", err)
}
} else if dependency.VersionId != "" {
version, err = getVersionById(dependency.VersionId)
if err != nil {
return p, fmt.Errorf("resolve dependency version: %w", err)
}
project, err = getProjectById(version.ProjectId)
if err != nil {
return p, fmt.Errorf("resolve dependency project: %w", err)
}
} else if dependency.ProjectId != "" {
project, err = getProjectById(dependency.ProjectId)
if err != nil {
return p, fmt.Errorf("resolve dependency project: %w", err)
}
// This is not safe, TODO: use better inference method
version, err = latestCompatibleVersion(syntax.ToProjectName(project.Slug), dependent.Platform)
if err != nil {
return p, fmt.Errorf("resolve dependency latest version: %w", err)
}
p.Name = syntax.ToProjectName(project.Slug)
p.Version = types.VersionCompatible
return p, nil
} else {
return p, ErrorInvalidDependency
}
p.Name = syntax.ToProjectName(project.Slug)
p.Version = types.RawVersion(version.VersionNumber)
return p, nil
}
package modrinth
import (
"strings"
)
type searchOptions struct {
index string
facets []facetItems
}
type facetItemOperation uint8
const (
operationEq facetItemOperation = iota
operationNeq
operationLeq
operationGeq
operationLt
operationGt
)
func (op facetItemOperation) String() string {
switch op {
case operationEq:
return ":"
case operationNeq:
return "!="
case operationLeq:
return "<="
case operationGeq:
return ">="
case operationLt:
return "<"
case operationGt:
return ">"
default:
return ""
}
}
// facet is the data structure to construct an advanced Modrinth search. It
// does not contain all the search options, only the ones that are expected in this
// program.
//
// From Modrinth docs:
//
// In order to then use these facets, you need a value to filter by, as well as
// an operation to perform on this value. The most common operation is ':'
// (same as =), though you can also use !=, >=, >, <=, and <. Join together the
// type, operation, and value, and you’ve got your string.
//
// {type} {operation} {value}
//
// categories = adventure
// versions != 1.20.1
// downloads <= 100
//
// You then join these strings together in arrays to signal AND OR operators.
//
// OR
// All elements in a single array are considered to be joined by OR statements.
// For example, the search [["versions:1.16.5", "versions:1.17.1"]] translates
// to Projects that support 1.16.5 OR 1.17.1.
//
// AND
// Separate arrays are considered to be joined by AND statements. For example,
// the search [["versions:1.16.5"], ["project_type:modpack"]] translates to
// Projects that support 1.16.5 AND are modpacks.
//
// API Docs: https://docs.modrinth.com/api/operations/searchprojects/
type facetItem struct {
Type string
Operation facetItemOperation
Value string
}
func (f *facetItem) String() string {
return `"` + f.Type + f.Operation.String() + f.Value + `"`
}
// facetItems is an array of facetItem. It represents an expression joined by OR statements.
// a complete facet is an array of facetItems, with each array joined by AND statements.
type facetItems []facetItem
// There are no facet data structures, rather, a function is used to directly
// create a facet string that can be used in the URL.
func serializeFacet(expressions ...facetItems) string {
var sb strings.Builder
sb.WriteRune('[')
for i, expression := range expressions {
if i > 0 {
sb.WriteRune(',')
}
sb.WriteRune('[')
for j, item := range expression {
if j > 0 {
sb.WriteRune(',')
}
sb.WriteString(item.String())
}
sb.WriteRune(']')
}
sb.WriteRune(']')
return sb.String()
}
var facetAllLoaders = facetItems{
{
Type: "categories",
Operation: operationEq,
Value: "forge",
},
{
Type: "categories",
Operation: operationEq,
Value: "fabric",
},
{
Type: "categories",
Operation: operationEq,
Value: "quilt",
},
{
Type: "categories",
Operation: operationEq,
Value: "liteloader",
},
{
Type: "categories",
Operation: operationEq,
Value: "modloader",
},
{
Type: "categories",
Operation: operationEq,
Value: "rift",
},
{
Type: "categories",
Operation: operationEq,
Value: "neoforge",
},
{
Type: "categories",
Operation: operationEq,
Value: "bukkit",
},
}
var facetForgeOnly = facetItems{
{
Type: "categories",
Operation: operationEq,
Value: "forge",
},
}
var facetFabricOnly = facetItems{
{
Type: "categories",
Operation: operationEq,
Value: "fabric",
},
}
var facetNeoforgeOnly = facetItems{
{
Type: "categories",
Operation: operationEq,
Value: "neoforge",
},
}
var facetBukkitOnly = facetItems{
{
Type: "categories",
Operation: operationEq,
Value: "bukkit",
},
}
var facetServerSupported = facetItems{
{
Type: "server_side",
Operation: operationEq,
Value: "required",
},
{
Type: "server_side",
Operation: operationEq,
Value: "optional",
},
}
var facetClientSupported = facetItems{
{
Type: "client_side",
Operation: operationEq,
Value: "required",
},
{
Type: "client_side",
Operation: operationEq,
Value: "optional",
},
}
var facetBothRequired = []facetItems{
{
{
Type: "server_side",
Operation: operationEq,
Value: "required",
},
},
{
{
Type: "client_side",
Operation: operationEq,
Value: "required",
},
},
}
var facetBothSupported = []facetItems{
{
{
Type: "server_side",
Operation: operationEq,
Value: "required",
},
{
Type: "server_side",
Operation: operationEq,
Value: "optional",
},
},
{
{
Type: "client_side",
Operation: operationEq,
Value: "required",
},
{
Type: "client_side",
Operation: operationEq,
Value: "optional",
},
},
}
package modrinth
import "github.com/mclucy/lucy/types"
func modrinthSearchSortingString(sort types.SearchSort) string {
switch sort {
case types.SearchSortRelevance:
return "relevance"
case types.SearchSortDownloads:
return "downloads"
case types.SearchSortNewest:
return "newest"
default:
return "relevance"
}
}
package modrinth
import (
"net/url"
"strings"
"text/template"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/types"
)
const projectUrlPrefix = "https://api.modrinth.com/v2/project/"
func versionsUrl(slug types.ProjectName) (urlString string) {
urlString, _ = url.JoinPath(
projectUrlPrefix,
string(slug),
"version",
)
return
}
const versionUrlPrefix = `https://api.modrinth.com/v2/version/`
func versionUrl(id string) (urlString string) {
return versionUrlPrefix + url.PathEscape(id)
}
// projectUrl returns the URL for a project with the given SourceModrinth project id
// or slug (package name).
func projectUrl(suffix string) (urlString string) {
return projectUrlPrefix + url.PathEscape(suffix)
}
func projectMemberUrl(suffix string) (urlString string) {
return projectUrl(suffix) + "/members"
}
func projectDependencyUrl(suffix string) (urlString string) {
return projectUrl(suffix) + "/dependencies"
}
const searchUrlTemplate = `https://api.modrinth.com/v2/search?query={{.query}}&limit=100&index={{.index}}&facets={{.facets}}`
func searchUrl(
query types.ProjectName,
option searchOptions,
) (urlString string) {
urlTemplate, _ := template.New("modrinth_search_url").Parse(searchUrlTemplate)
urlBuilder := strings.Builder{}
err := urlTemplate.Execute(
&urlBuilder,
map[string]any{
"query": url.QueryEscape(string(query)),
"index": option.index,
"facets": url.QueryEscape(serializeFacet(option.facets...)),
},
)
if err != nil {
logger.Error(err)
}
urlString = urlBuilder.String()
return urlString
}
const userHomepageUrlPrefix = `https://modrinth.com/user/`
// userHomepageUrl's suffix is the user's username or id.
func userHomepageUrl(suffix string) (urlString string) {
return userHomepageUrlPrefix + suffix
}
package modrinth
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/slugmap"
"github.com/mclucy/lucy/types"
)
// TODO: Refactor to separate all API functions to accept an url. While the urls
// are generated by other functions. This will make the code more modular and
// easier to test.
var (
ENoVersion = errors.New("modrinth version not found")
ENoProject = errors.New("modrinth project not found")
ENoMember = errors.New("modrinth project memberResponse not found")
)
// TODO: This has a chance of causing segmentation faults
func listVersions(slug types.ProjectName) (
versions []*versionResponse,
err error,
) {
tryFetch := func(target types.ProjectName) ([]*versionResponse, error) {
res, err := http.Get(versionsUrl(target))
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, ENoProject
}
data, err := io.ReadAll(res.Body)
if err != nil {
return nil, err
}
var out []*versionResponse
if err := json.Unmarshal(data, &out); err != nil {
return nil, ENoProject
}
return out, nil
}
versions, err = tryFetch(slug)
if err == nil {
return versions, nil
}
if canonical, ok := slugmap.Default().GetLoose(types.SourceModrinth, string(slug)); ok && canonical != string(slug) {
return tryFetch(types.ProjectName(canonical))
}
return nil, err
}
// getVersion is named as so because a Package in lucy is equivalent to a version
// in SourceModrinth.
func getVersion(id types.PackageId) (
v *versionResponse,
err error,
) {
versions, err := listVersions(id.Name)
if err != nil {
return nil, err
}
if id.Version == types.VersionLatest {
v, err = latestVersion(id.Name)
if err != nil {
return nil, err
}
return v, nil
}
if selected := selectExactVersion(versions, id); selected != nil {
return selected, nil
}
return nil, ENoVersion
}
func getVersionById(id string) (v *versionResponse, err error) {
res, err := http.Get(versionUrl(id))
if err != nil {
return nil, fmt.Errorf("modrinth: request failed: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, ENoVersion
}
data, err := io.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("modrinth: failed to read response: %w", err)
}
v = &versionResponse{}
err = json.Unmarshal(data, v)
if err != nil {
return nil, ENoVersion
}
return
}
func versionSupportsLoader(
version *versionResponse,
loader types.Platform,
) bool {
for _, l := range version.Loaders {
if types.Platform(l).Satisfy(loader) {
return true
}
}
return false
}
func latestVersion(slug types.ProjectName) (
v *versionResponse,
err error,
) {
versions, err := listVersions(slug)
if err != nil {
return nil, err
}
v, fellBack := selectLatestVersionCandidate(versions, types.PlatformNone)
if v == nil {
return nil, ENoVersion
}
if fellBack {
// No release version found; fall back to the latest pre-release (beta/alpha).
logger.Info("no release version found for " + slug.Title() + ", falling back to latest pre-release")
}
logger.Debug("latest version of " + slug.String() + ": " + v.VersionNumber)
return v, nil
}
func latestCompatibleVersion(slug types.ProjectName, platform types.Platform) (
v *versionResponse,
err error,
) {
versions, err := listVersions(slug)
if err != nil {
return nil, err
}
filterByLoader := platform != types.PlatformAny && platform != types.PlatformNone
if filterByLoader {
v, _ = selectLatestCompatibleVersionCandidate(versions, platform)
} else {
v, _ = selectLatestVersionCandidate(versions, platform)
}
if v == nil {
return nil, ENoVersion
}
if filterByLoader && latestReleaseVersion(versions, platform, true) == nil {
// No release version found; fall back to the latest pre-release (beta/alpha).
logger.Info("no compatible version found for " + slug.Title() + ", falling back to latest pre-release")
} else if !filterByLoader && latestReleaseVersion(versions, platform, false) == nil {
logger.Info("no compatible version found for " + slug.Title() + ", falling back to latest pre-release")
}
return v, nil
}
// Package routing contains source-to-provider bindings and source resolution
// policies.
//
// Responsibilities:
// - Resolve SourceAuto against Platform into ordered provider candidates.
// - Map explicit Source to exactly one provider when supported.
// - Apply operation-aware routing policy (search/info/fetch/dependencies).
// - Return typed selection errors for invalid/unsupported inputs.
//
// Non-responsibilities:
// - Do not call provider APIs.
// - Do not aggregate or merge upstream result payloads.
package routing
import (
"errors"
"fmt"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
"github.com/mclucy/lucy/upstream/curseforge"
"github.com/mclucy/lucy/upstream/githubsource"
"github.com/mclucy/lucy/upstream/hangar"
"github.com/mclucy/lucy/upstream/mcdr"
"github.com/mclucy/lucy/upstream/modrinth"
"github.com/mclucy/lucy/upstream/spiget"
)
var (
ErrUnknownSource = errors.New("unknown source")
ErrUnsupportedSource = errors.New("unsupported source")
ErrInvalidPlatform = errors.New("cannot find sources for platform")
)
// providerBySource binds semantic Source values to executable Provider
// implementations.
//
// Source and Provider are intentionally not synonyms:
// - Some Source values are policy/sentinel markers (SourceAuto/SourceUnknown).
// - A Source can resolve to one provider, many providers, or none.
var providerBySource = map[types.Source]upstream.Provider{
types.SourceModrinth: modrinth.Provider,
types.SourceGitHub: githubsource.Provider,
types.SourceMCDR: mcdr.Provider,
types.SourceHangar: hangar.Provider,
types.SourceSpiget: spiget.Provider,
}
func listModProviders() []upstream.Provider {
providers, _ := providersFromSources(modProviderSources())
return providers
}
// ListAutoProviders returns the default ordered provider list used when
// source=auto and platform=all.
func ListAutoProviders() []upstream.Provider {
providers := listModProviders()
providers, _ = providersFromSources(append(modProviderSources(), types.SourceMCDR))
return providers
}
func GetProvider(src types.Source) (upstream.Provider, bool, error) {
if src == types.SourceCurseForge {
if err := curseforge.AvailabilityError(); err != nil {
return nil, false, err
}
return curseforge.Provider, true, nil
}
p, ok := providerBySource[src]
return p, ok, nil
}
// ResolveProviders resolves ordered provider candidates for a given operation,
// platform, and user-specified source.
func ResolveProviders(
platform types.Platform,
src types.Source,
) ([]upstream.Provider, error) {
if src == types.SourceUnknown {
return nil, ErrUnknownSource
}
if src != types.SourceAuto {
return resolveExplicitSource(src)
}
sources, err := providerSourcesForPlatform(platform)
if err != nil {
return nil, fmt.Errorf("%w: %s", err, platform)
}
return providersFromSources(sources)
}
// ResolveSearchProviders resolves providers for search operations. When a
// specific platform filter is active, routing validates explicit source
// selection and uses source capability data as the authority for automatic
// selection.
func ResolveSearchProviders(
platform types.Platform,
src types.Source,
) ([]upstream.Provider, error) {
if src == types.SourceUnknown {
return nil, ErrUnknownSource
}
if src != types.SourceAuto {
if err := validateSearchSourcePlatform(src, platform); err != nil {
return nil, err
}
return resolveExplicitSource(src)
}
if !platform.IsSearchPlatform() {
return ResolveProviders(platform, src)
}
sources := providerSourcesForSearchPlatform(platform)
if len(sources) == 0 {
return nil, fmt.Errorf("%w: %s", ErrInvalidPlatform, platform)
}
return providersFromSources(sources)
}
func ResolveProvidersFromTopology(
topology *types.RuntimeTopology,
src types.Source,
) ([]upstream.Provider, error) {
if src == types.SourceUnknown {
return nil, ErrUnknownSource
}
if src != types.SourceAuto {
return resolveExplicitSource(src)
}
if topology == nil || !topology.Resolved() {
return nil, fmt.Errorf("routing: topology unresolved, cannot resolve providers")
}
selection := providerSourcesFromTopology(topology)
if len(selection.sources) > 0 {
return providersFromSources(selection.sources)
}
if selection.fallback {
return ListAutoProviders(), nil
}
return []upstream.Provider{}, nil
}
func resolveExplicitSource(src types.Source) ([]upstream.Provider, error) {
provider, ok, err := GetProvider(src)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("%w: %s", ErrUnsupportedSource, src)
}
return []upstream.Provider{provider}, nil
}
func validateSearchSourcePlatform(src types.Source, platform types.Platform) error {
if !platform.IsSearchPlatform() {
return nil
}
support, ok := PlatformSupportedBy(src, platform)
if ok && support.Supported {
return nil
}
return fmt.Errorf("source %s does not support platform %s", src, platform)
}
func providersFromSources(sources []types.Source) ([]upstream.Provider, error) {
providers := make([]upstream.Provider, 0, len(sources))
for _, source := range sources {
provider, ok, err := GetProvider(source)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("%w: %s", ErrUnsupportedSource, source)
}
providers = append(providers, provider)
}
return providers, nil
}
func curseforgeAvailable() bool {
return curseforge.Enabled()
}
package routing
import "github.com/mclucy/lucy/types"
// SearchPlatformSupport describes how a source can participate in search for a
// given platform.
type SearchPlatformSupport struct {
// Supported reports whether the source can serve this platform at all.
Supported bool
// UpstreamFilterable reports whether the source can apply the platform filter
// upstream instead of requiring post-filtering.
UpstreamFilterable bool
}
// SourceSearchCapability describes static search capabilities for a source.
//
// This is a struct instead of an interface so additional capability dimensions
// can be added later without breaking callers.
type SourceSearchCapability struct {
Platforms map[types.Platform]SearchPlatformSupport
}
var unsupportedSearchPlatform = SearchPlatformSupport{}
var searchCapabilityBySource = map[types.Source]SourceSearchCapability{
types.SourceModrinth: {
Platforms: map[types.Platform]SearchPlatformSupport{
types.PlatformFabric: {Supported: true, UpstreamFilterable: true},
types.PlatformForge: {Supported: true, UpstreamFilterable: true},
types.PlatformNeoforge: {Supported: true, UpstreamFilterable: true},
types.PlatformBukkit: {Supported: true, UpstreamFilterable: true},
},
},
types.SourceCurseForge: {
Platforms: map[types.Platform]SearchPlatformSupport{
types.PlatformFabric: {Supported: true, UpstreamFilterable: true},
types.PlatformForge: {Supported: true, UpstreamFilterable: true},
types.PlatformNeoforge: {Supported: true, UpstreamFilterable: true},
types.PlatformBukkit: unsupportedSearchPlatform,
},
},
types.SourceHangar: {
Platforms: map[types.Platform]SearchPlatformSupport{
types.PlatformFabric: unsupportedSearchPlatform,
types.PlatformForge: unsupportedSearchPlatform,
types.PlatformNeoforge: unsupportedSearchPlatform,
types.PlatformBukkit: {Supported: true, UpstreamFilterable: true},
},
},
types.SourceSpiget: {
Platforms: map[types.Platform]SearchPlatformSupport{
types.PlatformFabric: unsupportedSearchPlatform,
types.PlatformForge: unsupportedSearchPlatform,
types.PlatformNeoforge: unsupportedSearchPlatform,
types.PlatformBukkit: {Supported: true, UpstreamFilterable: false},
},
},
types.SourceMCDR: {
Platforms: map[types.Platform]SearchPlatformSupport{
types.PlatformFabric: unsupportedSearchPlatform,
types.PlatformForge: unsupportedSearchPlatform,
types.PlatformNeoforge: unsupportedSearchPlatform,
types.PlatformBukkit: unsupportedSearchPlatform,
},
},
}
// SearchCapabilityFor returns static search capability metadata for a source.
func SearchCapabilityFor(src types.Source) (SourceSearchCapability, bool) {
capability, ok := searchCapabilityBySource[src]
return capability, ok
}
// PlatformSupportedBy returns the search support details for one source and
// platform combination.
func PlatformSupportedBy(src types.Source, platform types.Platform) (SearchPlatformSupport, bool) {
capability, ok := SearchCapabilityFor(src)
if !ok {
return SearchPlatformSupport{}, false
}
support, ok := capability.Platforms[platform]
return support, ok
}
package routing
import (
"errors"
"fmt"
"sync"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
var ErrNoProviderSucceeded = errors.New("no provider succeeded")
type ProviderError struct {
// Source identifies the semantic upstream label for user-facing diagnostics.
// The failed runtime executor is a Provider implementation.
Source types.Source
Err error
}
func (e ProviderError) Error() string {
return fmt.Sprintf("%s: %v", e.Source.String(), e.Err)
}
func (e ProviderError) Unwrap() error {
return e.Err
}
type InfoResult struct {
Information types.ProjectInformation
Fetch upstream.FetchResult
}
// SearchMany executes search on all providers in parallel.
//
// Default behavior is non-aggregated: each provider contributes one
// types.SearchResults item in the returned slice.
func SearchMany(
providers []upstream.Provider,
query types.ProjectName,
options types.SearchOptions,
) ([]types.SearchResults, []ProviderError) {
if len(providers) == 0 {
return nil, nil
}
type slot struct {
res types.SearchResults
err ProviderError
ok bool
failed bool
}
slots := make([]slot, len(providers))
var wg sync.WaitGroup
for i, provider := range providers {
wg.Add(1)
go func(index int, provider upstream.Provider) {
defer wg.Done()
res, err := upstream.Search(provider, query, options)
if err != nil {
slots[index] = slot{
failed: true,
err: ProviderError{
Source: provider.Source(),
Err: err,
},
}
return
}
slots[index] = slot{ok: true, res: res}
}(i, provider)
}
wg.Wait()
results := make([]types.SearchResults, 0, len(providers))
providerErrors := make([]ProviderError, 0)
for _, item := range slots {
if item.ok {
results = append(results, item.res)
}
if item.failed {
providerErrors = append(providerErrors, item.err)
}
}
return results, providerErrors
}
// FetchMany executes fetch on all providers in parallel and returns all
// successful results.
func FetchMany(
providers []upstream.Provider,
id types.PackageId,
) ([]upstream.FetchResult, []ProviderError) {
if len(providers) == 0 {
return nil, nil
}
type slot struct {
res upstream.FetchResult
err ProviderError
ok bool
failed bool
}
slots := make([]slot, len(providers))
var wg sync.WaitGroup
for i, provider := range providers {
wg.Add(1)
go func(index int, provider upstream.Provider) {
defer wg.Done()
remoteData, err := upstream.Fetch(provider, id)
if err != nil {
slots[index] = slot{
failed: true,
err: ProviderError{
Source: provider.Source(),
Err: err,
},
}
return
}
slots[index] = slot{ok: true, res: remoteData}
}(i, provider)
}
wg.Wait()
results := make([]upstream.FetchResult, 0, len(providers))
providerErrors := make([]ProviderError, 0)
for _, item := range slots {
if item.ok {
results = append(results, item.res)
}
if item.failed {
providerErrors = append(providerErrors, item.err)
}
}
return results, providerErrors
}
// FirstFetch executes fetch on all providers in parallel and returns the first
// successful result.
func FirstFetch(
providers []upstream.Provider,
id types.PackageId,
) (upstream.FetchResult, []ProviderError, error) {
// TODO: implement this function in a way that doesn't wait for all providers
// to finish if one has already succeeded
panic("not implemented")
}
// FirstInfo executes info+fetch on all providers in parallel and returns the
// first successful result.
func FirstInfo(
providers []upstream.Provider,
id types.PackageId,
) (InfoResult, []ProviderError, error) {
if len(providers) == 0 {
return InfoResult{}, nil, ErrNoProviderSucceeded
}
results := make(chan InfoResult, len(providers))
errorsChan := make(chan ProviderError, len(providers))
for _, provider := range providers {
go func(provider upstream.Provider) {
info, err := upstream.Information(provider, id.Name)
if err != nil {
errorsChan <- ProviderError{
Source: provider.Source(),
Err: fmt.Errorf("information failed: %w", err),
}
return
}
remoteData, err := upstream.Fetch(provider, id)
if err != nil {
errorsChan <- ProviderError{
Source: provider.Source(),
Err: fmt.Errorf("fetch failed: %w", err),
}
return
}
results <- InfoResult{Information: info, Fetch: remoteData}
}(provider)
}
providerErrors := make([]ProviderError, 0, len(providers))
pending := len(providers)
for pending > 0 {
select {
case result := <-results:
return result, providerErrors, nil
case providerErr := <-errorsChan:
providerErrors = append(providerErrors, providerErr)
pending--
}
}
return InfoResult{}, providerErrors, joinProviderErrors(providerErrors)
}
// DependenciesMany executes Dependencies on all providers in parallel and
// returns all successful results. An error is returned only when every provider
// fails; partial failures are collected in the returned []ProviderError slice.
func DependenciesMany(
providers []upstream.Provider,
id types.PackageId,
) ([]types.PackageDependencies, []ProviderError) {
if len(providers) == 0 {
return nil, nil
}
type slot struct {
res types.PackageDependencies
err ProviderError
ok bool
failed bool
}
slots := make([]slot, len(providers))
var wg sync.WaitGroup
for i, provider := range providers {
wg.Add(1)
go func(index int, provider upstream.Provider) {
defer wg.Done()
deps, err := upstream.Dependencies(provider, id)
if err != nil {
slots[index] = slot{
failed: true,
err: ProviderError{
Source: provider.Source(),
Err: err,
},
}
return
}
result := types.PackageDependencies{}
if deps != nil {
result = *deps
}
slots[index] = slot{ok: true, res: result}
}(i, provider)
}
wg.Wait()
results := make([]types.PackageDependencies, 0, len(providers))
providerErrors := make([]ProviderError, 0)
for _, item := range slots {
if item.ok {
results = append(results, item.res)
}
if item.failed {
providerErrors = append(providerErrors, item.err)
}
}
if len(results) == 0 && len(providerErrors) > 0 {
return nil, providerErrors
}
return results, providerErrors
}
func joinProviderErrors(providerErrors []ProviderError) error {
if len(providerErrors) == 0 {
return ErrNoProviderSucceeded
}
joined := make([]error, 0, len(providerErrors))
for _, providerErr := range providerErrors {
joined = append(joined, providerErr)
}
return errors.Join(joined...)
}
package routing
import "github.com/mclucy/lucy/types"
var searchProviderSourcesInPriorityOrder = []types.Source{
types.SourceModrinth,
types.SourceCurseForge,
types.SourceHangar,
types.SourceSpiget,
}
func autoProviderSources() []types.Source {
return append(modProviderSources(), types.SourceMCDR)
}
func modProviderSources() []types.Source {
sources := []types.Source{types.SourceModrinth}
if curseforgeAvailable() {
sources = append(sources, types.SourceCurseForge)
}
return sources
}
func providerSourcesForPlatform(platform types.Platform) ([]types.Source, error) {
switch platform {
case types.PlatformAny:
return autoProviderSources(), nil
case types.PlatformMCDR:
return []types.Source{types.SourceMCDR}, nil
case types.PlatformForge, types.PlatformFabric, types.PlatformNeoforge, types.PlatformBukkit:
return providerSourcesForSearchPlatform(platform), nil
default:
return nil, ErrInvalidPlatform
}
}
func providerSourcesForSearchPlatform(platform types.Platform) []types.Source {
sources := make([]types.Source, 0, len(searchProviderSourcesInPriorityOrder))
for _, source := range searchProviderSourcesInPriorityOrder {
if source == types.SourceCurseForge && !curseforgeAvailable() {
continue
}
support, ok := PlatformSupportedBy(source, platform)
if !ok || !support.Supported {
continue
}
sources = append(sources, source)
}
return sources
}
type topologyResolution struct {
sources []types.Source
fallback bool
empty bool
}
func providerSourcesFromTopology(topology *types.RuntimeTopology) topologyResolution {
selection := topologyResolution{}
seen := map[types.Source]struct{}{}
sawKnownCapability := false
sawProxyCapability := false
appendSource := func(source types.Source) {
if _, ok := seen[source]; ok {
return
}
seen[source] = struct{}{}
selection.sources = append(selection.sources, source)
}
for _, node := range topology.Nodes {
for _, capability := range node.Capabilities {
switch capability {
case types.CapabilityFabricMods,
types.CapabilityForgeMods,
types.CapabilityNeoforgeMods,
types.CapabilityBukkitPlugins:
sawKnownCapability = true
appendSource(types.SourceModrinth)
if capability != types.CapabilityBukkitPlugins && curseforgeAvailable() {
appendSource(types.SourceCurseForge)
}
if capability == types.CapabilityBukkitPlugins {
appendSource(types.SourceHangar)
appendSource(types.SourceSpiget)
}
case types.CapabilityMCDRPlugins:
sawKnownCapability = true
appendSource(types.SourceMCDR)
case types.CapabilityProxying:
sawKnownCapability = true
sawProxyCapability = true
}
}
}
if len(selection.sources) > 0 {
return selection
}
if sawProxyCapability {
selection.empty = true
return selection
}
if !sawKnownCapability {
selection.fallback = true
}
selection.empty = true
return selection
}
func providerSourcesByCapability(topology *types.RuntimeTopology) []types.Source {
sources := make([]types.Source, 0, 2)
seen := map[types.Source]struct{}{}
appendSource := func(source types.Source) {
if _, exists := seen[source]; exists {
return
}
seen[source] = struct{}{}
sources = append(sources, source)
}
if topology.HasCapability(types.CapabilityFabricMods) ||
topology.HasCapability(types.CapabilityForgeMods) ||
topology.HasCapability(types.CapabilityNeoforgeMods) {
appendSource(types.SourceModrinth)
if curseforgeAvailable() {
appendSource(types.SourceCurseForge)
}
}
if topology.HasCapability(types.CapabilityBukkitPlugins) {
appendSource(types.SourceModrinth)
appendSource(types.SourceHangar)
appendSource(types.SourceSpiget)
}
if topology.HasCapability(types.CapabilityMCDRPlugins) {
appendSource(types.SourceMCDR)
}
if topology.HasCapability(types.CapabilityProxying) {
appendSource(types.SourceModrinth)
}
return sources
}
package routing
import "github.com/mclucy/lucy/types"
type SearchAggregateOptions struct {
Enabled bool
}
// MaybeAggregateSearchResults is an optional post-processing utility. It is
// disabled by default and intentionally decoupled from SearchMany.
func MaybeAggregateSearchResults(
results []types.SearchResults,
options SearchAggregateOptions,
) []types.SearchResults {
if !options.Enabled || len(results) <= 1 {
return results
}
return []types.SearchResults{AggregateSearchResults(results)}
}
// AggregateSearchResults merges multi-provider search results into one result.
// Source metadata remains in the original non-aggregated form and should be
// preferred unless aggregation is explicitly required by callers.
func AggregateSearchResults(results []types.SearchResults) types.SearchResults {
aggregated := types.SearchResults{
Source: types.SourceAuto,
Projects: make([]types.ProjectName, 0),
}
for _, res := range results {
aggregated.Projects = append(aggregated.Projects, res.Projects...)
}
return aggregated
}
package routing
import (
"fmt"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
// ResolveProvidersByTopology resolves providers using runtime topology
// capabilities. Returns an error when topology is nil/unresolved.
// Explicit source selection always delegates to ResolveProviders.
func ResolveProvidersByTopology(
topology *types.RuntimeTopology,
platform types.Platform,
src types.Source,
) ([]upstream.Provider, error) {
if topology == nil || !topology.Resolved() {
return nil, fmt.Errorf("routing: topology unresolved, cannot resolve providers")
}
if src != types.SourceAuto {
return ResolveProviders(platform, src)
}
if platform == types.PlatformAny {
return ListAutoProviders(), nil
}
sources := providerSourcesByCapability(topology)
if len(sources) == 0 {
return nil, fmt.Errorf("%w: no providers resolved from topology", ErrInvalidPlatform)
}
return providersFromSources(sources)
}
package slugresolve
import (
"crypto/sha256"
"encoding/hex"
"io"
"os"
"github.com/mclucy/lucy/slugmap"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream/curseforge"
"github.com/mclucy/lucy/upstream/modrinth"
)
func ResolveSlug(
src types.Source,
localId string,
filePath string,
metadataURLs []string,
) string {
var fileHash string
if filePath != "" {
fileHash = sha256File(filePath)
}
if fileHash != "" {
if slug, ok := slugmap.Default().Get(src, localId, fileHash); ok {
return slug
}
}
hintSlug := ""
if slug, ok := slugmap.Default().GetLoose(src, localId); ok {
hintSlug = slug
}
if hintSlug == "" {
for _, u := range metadataURLs {
urlSrc, s, ok := ExtractFromURL(u)
if ok && urlSrc == src && s != "" {
hintSlug = s
break
}
}
}
if filePath != "" {
var slug string
var err error
switch src {
case types.SourceModrinth:
slug, err = modrinth.SlugFromFilePathWithHint(filePath, hintSlug)
case types.SourceCurseForge:
slug, err = curseforge.SlugFromFilePathWithHint(filePath, hintSlug)
}
if err == nil && slug != "" {
if fileHash != "" {
slugmap.Default().Set(src, localId, fileHash, slug, "hash")
}
return slug
}
}
return localId
}
func sha256File(path string) string {
f, err := os.Open(path)
if err != nil {
return ""
}
defer f.Close()
h := sha256.New()
if _, err := io.Copy(h, f); err != nil {
return ""
}
return hex.EncodeToString(h.Sum(nil))
}
package slugresolve
import (
"net/url"
"strings"
"github.com/mclucy/lucy/types"
)
// ExtractFromURL parses a mod homepage URL and returns the upstream source
// and canonical slug if the URL is a recognised Modrinth or CurseForge
// project page.
//
// Recognised patterns:
//
// https://modrinth.com/mod/<slug>
// https://modrinth.com/plugin/<slug>
// https://modrinth.com/datapack/<slug>
// https://www.curseforge.com/minecraft/mc-mods/<slug>
// https://curseforge.com/minecraft/mc-mods/<slug>
func ExtractFromURL(rawURL string) (src types.Source, slug string, ok bool) {
if rawURL == "" {
return types.SourceAuto, "", false
}
u, err := url.Parse(rawURL)
if err != nil {
return types.SourceAuto, "", false
}
host := strings.TrimPrefix(u.Host, "www.")
parts := strings.Split(strings.Trim(u.Path, "/"), "/")
switch host {
case "modrinth.com":
// /mod/<slug>, /plugin/<slug>, /datapack/<slug>
if len(parts) >= 2 {
return types.SourceModrinth, parts[1], true
}
case "curseforge.com":
// /minecraft/mc-mods/<slug>
if len(parts) >= 3 && parts[0] == "minecraft" && parts[1] == "mc-mods" {
return types.SourceCurseForge, parts[2], true
}
}
return types.SourceAuto, "", false
}
package spiget
import (
"errors"
"fmt"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/types"
"github.com/mclucy/lucy/upstream"
)
type provider struct{}
var Provider provider
func (provider) Source() types.Source {
return types.SourceSpiget
}
func (provider) Search(
query string,
options types.SearchOptions,
) (res upstream.RawSearchResults, err error) {
if options.FilterPlatform == types.PlatformBukkit {
logger.Debug("spiget: platform filter is not supported upstream; search will run without a platform query parameter")
}
resp, err := searchResources(query, options)
if err != nil {
return nil, err
}
return resp, nil
}
func (p provider) Fetch(id types.PackageId) (
remote upstream.RawPackageRemote,
err error,
) {
resource, err := resolveResourceByProjectName(id.Name)
if err != nil {
return nil, err
}
resolved, err := resolveVersion(resource, id.Version)
if err != nil {
return nil, err
}
return resolved, nil
}
func (p provider) Information(name types.ProjectName) (
info upstream.RawProjectInformation,
err error,
) {
resource, err := resolveResourceByProjectName(name)
if err != nil {
return nil, err
}
return resource, nil
}
func (p provider) Support(name types.ProjectName) (
supports upstream.RawProjectSupport,
err error,
) {
resource, err := resolveResourceByProjectName(name)
if err != nil {
return nil, err
}
return resource, nil
}
func (p provider) Dependencies(id types.PackageId) (
deps upstream.RawPackageDependencies,
err error,
) {
return nil, ErrNotImplemented
}
func (p provider) ParseAmbiguousId(id types.PackageId) (
parsed types.PackageId,
err error,
) {
parsed = id
switch id.Version {
case "", types.VersionAny, types.VersionNone, types.VersionLatest, types.VersionCompatible:
default:
return id, nil
}
resource, err := resolveResourceByProjectName(id.Name)
if err != nil {
return id, err
}
resolved, err := resolveVersion(resource, id.Version)
if err != nil {
return id, err
}
parsed.Version = resolved.LucyVersion()
logger.Debug("parsed from " + id.StringFull() + " to " + parsed.StringFull())
return parsed, nil
}
var (
ErrNotImplemented = errors.New("spiget: not implemented")
ErrNoProject = errors.New("spiget: project not found")
ErrNoVersion = errors.New("spiget: version not found")
)
func unexpectedStatusError(url string, statusCode int) error {
return fmt.Errorf("spiget: unexpected status %d for %s", statusCode, url)
}
package spiget
import (
"encoding/base64"
"fmt"
"net/url"
"strconv"
"strings"
"unicode"
"github.com/mclucy/lucy/syntax"
"github.com/mclucy/lucy/types"
)
const (
spigetAPIBaseURL = "https://api.spiget.org/v2"
spigotWebsiteBaseURL = "https://www.spigotmc.org/"
)
type decodedHTML struct {
Value string
Valid bool
}
// resolvedVersion keeps enough provider-local identity to resolve exact Spiget
// downloads while still exposing Lucy-friendly human version names.
type resolvedVersion struct {
ResourceID int64
VersionID int64
VersionName string
ProjectName string
FileType string
External bool
ExternalURL string
UUID string
}
func (s searchResponse) ToSearchResults() types.SearchResults {
results := types.SearchResults{
Source: types.SourceSpiget,
Projects: make([]types.ProjectName, 0, len(s)),
}
for _, resource := range s {
if resource.Name == "" {
continue
}
results.Projects = append(results.Projects, normalizedProjectName(resource.Name))
}
return results
}
func (r resourceResponse) ToProjectInformation() types.ProjectInformation {
description := combineHTMLSections(
decodeBase64HTML(r.Description),
decodeBase64HTML(r.Documentation),
)
info := types.ProjectInformation{
Title: r.Name,
Brief: r.Tag,
Description: description,
DescriptionIsMarkdown: false,
Urls: make([]types.Url, 0, len(r.Links)+2),
}
appendLinkURLs(&info, r.Links)
if r.SourceCodeLink != "" {
info.Urls = append(info.Urls, types.Url{Name: "Source", Type: types.UrlSource, Url: r.SourceCodeLink})
}
if r.DonationLink != "" {
info.Urls = append(info.Urls, types.Url{Name: "Donate", Type: types.UrlSponsor, Url: r.DonationLink})
}
return info
}
func (r resourceResponse) ToProjectSupport() types.PlatformSupport {
support := types.PlatformSupport{
MinecraftVersions: make([]types.RawVersion, 0, len(r.TestedVersions)),
Platforms: make([]types.Platform, 0),
Authentic: false,
}
for _, version := range r.TestedVersions {
if version == "" {
continue
}
support.MinecraftVersions = append(support.MinecraftVersions, types.RawVersion(version))
}
return support
}
// NewResolvedVersion preserves both Lucy-facing human versions and Spiget's
// numeric resource/version identifiers for later exact download resolution.
func NewResolvedVersion(resource resourceResponse, version versionResponse) resolvedVersion {
return resolvedVersion{
ResourceID: resource.ID,
VersionID: version.ID,
VersionName: version.Name,
ProjectName: normalizedProjectName(resource.Name).String(),
FileType: resource.File.Type,
External: resource.External,
ExternalURL: resource.File.ExternalURL,
UUID: version.UUID,
}
}
func (r resolvedVersion) LucyVersion() types.RawVersion {
if r.VersionName != "" {
return types.RawVersion(r.VersionName)
}
if r.VersionID != 0 {
return types.RawVersion(strconv.FormatInt(r.VersionID, 10))
}
return types.VersionUnknown
}
func (r resolvedVersion) Matches(version types.RawVersion) bool {
requested := strings.TrimSpace(version.String())
if requested == "" || requested == types.VersionAny.String() {
return false
}
if requested == string(r.LucyVersion()) {
return true
}
return r.VersionID != 0 && requested == strconv.FormatInt(r.VersionID, 10)
}
func (r resolvedVersion) ToPackageRemote() types.PackageRemote {
return types.PackageRemote{
Source: types.SourceSpiget,
FileUrl: r.downloadURL(),
Filename: r.filename(),
}
}
func (r resolvedVersion) downloadURL() string {
if r.External && r.ExternalURL != "" {
return r.ExternalURL
}
if r.ResourceID == 0 || r.VersionID == 0 {
return ""
}
return fmt.Sprintf(
"%s/resources/%d/versions/%d/download",
spigetAPIBaseURL,
r.ResourceID,
r.VersionID,
)
}
func (r resolvedVersion) filename() string {
base := strings.TrimSpace(r.ProjectName)
if base == "" {
base = strconv.FormatInt(r.ResourceID, 10)
}
version := strings.TrimSpace(r.VersionName)
if version == "" && r.VersionID != 0 {
version = strconv.FormatInt(r.VersionID, 10)
}
if version != "" {
base += "-" + version
}
if ext := normalizedFileExtension(r.FileType); ext != "" {
return base + ext
}
return base
}
func normalizedFileExtension(fileType string) string {
trimmed := strings.TrimSpace(fileType)
if trimmed == "" || strings.EqualFold(trimmed, "external") {
return ""
}
if strings.HasPrefix(trimmed, ".") {
return trimmed
}
return "." + trimmed
}
func decodeBase64HTML(encoded string) decodedHTML {
encoded = strings.TrimSpace(encoded)
if encoded == "" {
return decodedHTML{}
}
decoded, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return decodedHTML{}
}
return decodedHTML{Value: string(decoded), Valid: true}
}
func combineHTMLSections(description, documentation decodedHTML) string {
switch {
case description.Valid && documentation.Valid:
return description.Value + "\n\n<hr />\n\n<h2>Documentation</h2>\n" + documentation.Value
case description.Valid:
return description.Value
case documentation.Valid:
return documentation.Value
default:
return ""
}
}
func appendLinkURLs(info *types.ProjectInformation, links map[string]string) {
for key, rawValue := range links {
value := normalizeSpigetURL(rawValue)
if value == "" {
continue
}
name, kind := classifySpigetLink(key)
info.Urls = append(info.Urls, types.Url{Name: name, Type: kind, Url: value})
}
}
func normalizeSpigetURL(raw string) string {
raw = strings.TrimSpace(raw)
if raw == "" {
return ""
}
if strings.HasPrefix(raw, "http://") || strings.HasPrefix(raw, "https://") {
return raw
}
if strings.HasPrefix(raw, "/") {
return spigotWebsiteBaseURL[:len(spigotWebsiteBaseURL)-1] + raw
}
if strings.HasPrefix(raw, "threads/") || strings.HasPrefix(raw, "resources/") || strings.HasPrefix(raw, "members/") {
return spigotWebsiteBaseURL + raw
}
return raw
}
func classifySpigetLink(key string) (string, types.UrlType) {
switch key {
case "additionalInformation":
return "Additional information", types.UrlHome
case "alternativeSupport":
return "Support", types.UrlForum
case "discussion":
return "Discussion", types.UrlForum
}
decoded := decodeBase64HTML(key)
if decoded.Valid {
if _, err := url.ParseRequestURI(decoded.Value); err == nil {
return "Link", types.UrlMisc
}
return prettifyLinkName(decoded.Value), types.UrlMisc
}
return prettifyLinkName(key), types.UrlMisc
}
func prettifyLinkName(name string) string {
name = strings.TrimSpace(name)
if name == "" {
return "Link"
}
replacer := strings.NewReplacer("-", " ", "_", " ")
name = replacer.Replace(name)
return strings.ToUpper(name[:1]) + name[1:]
}
func normalizedProjectName(name string) types.ProjectName {
name = syntax.ToProjectName(name).String()
var b strings.Builder
b.Grow(len(name))
lastHyphen := false
for _, r := range name {
switch {
case unicode.IsLetter(r) || unicode.IsDigit(r):
b.WriteRune(unicode.ToLower(r))
lastHyphen = false
case !lastHyphen:
b.WriteByte('-')
lastHyphen = true
}
}
return types.ProjectName(strings.Trim(b.String(), "-"))
}
package spiget
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
"github.com/mclucy/lucy/types"
)
func requestJSON(requestURL string, out any, notFound error) error {
logger.Debug("spiget api: GET " + requestURL)
resp, err := http.Get(requestURL)
if err != nil {
return fmt.Errorf("spiget: request failed: %w", err)
}
defer tools.CloseReader(resp.Body, logger.Warn)
if resp.StatusCode != http.StatusOK {
if resp.StatusCode == http.StatusNotFound && notFound != nil {
return notFound
}
return unexpectedStatusError(requestURL, resp.StatusCode)
}
data, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("spiget: failed to read response: %w", err)
}
if err := json.Unmarshal(data, out); err != nil {
return fmt.Errorf("spiget: failed to decode response: %w", err)
}
return nil
}
func searchResources(query string, options types.SearchOptions) (searchResponse, error) {
u := searchResourcesURL(query, options)
resp := searchResponse{}
if err := requestJSON(u, &resp, nil); err != nil {
return nil, err
}
return resp, nil
}
func getResource(id int64) (*resourceResponse, error) {
resp := &resourceResponse{}
if err := requestJSON(resourceURL(id), resp, ErrNoProject); err != nil {
return nil, err
}
return resp, nil
}
func getLatestVersion(resourceID int64) (*versionResponse, error) {
resp := &versionResponse{}
if err := requestJSON(latestVersionURL(resourceID), resp, ErrNoVersion); err != nil {
return nil, err
}
return resp, nil
}
func listVersions(resourceID int64) ([]versionResponse, error) {
resp := []versionResponse{}
if err := requestJSON(versionsURL(resourceID), &resp, ErrNoVersion); err != nil {
return nil, err
}
return resp, nil
}
func searchResourcesURL(query string, options types.SearchOptions) string {
values := url.Values{}
values.Set("size", "20")
if sort := spigetSearchSort(options.SortBy); sort != "" {
values.Set("sort", sort)
}
return spigetAPIBaseURL + "/search/resources/" + url.PathEscape(query) + "?" + values.Encode()
}
func resourceURL(id int64) string {
return spigetAPIBaseURL + "/resources/" + strconv.FormatInt(id, 10)
}
func latestVersionURL(resourceID int64) string {
return resourceURL(resourceID) + "/versions/latest"
}
func versionsURL(resourceID int64) string {
values := url.Values{}
values.Set("size", "1000")
values.Set("sort", "-releaseDate")
return resourceURL(resourceID) + "/versions?" + values.Encode()
}
func spigetSearchSort(sort types.SearchSort) string {
switch sort {
case types.SearchSortDownloads:
return "-downloads"
case types.SearchSortNewest:
return "-updateDate"
case types.SearchSortName:
return "+name"
default:
return ""
}
}
func parseNumericResourceID(name types.ProjectName) (int64, bool) {
trimmed := strings.TrimSpace(name.String())
if trimmed == "" {
return 0, false
}
id, err := strconv.ParseInt(trimmed, 10, 64)
if err != nil || id <= 0 {
return 0, false
}
return id, true
}
package spiget
import (
"strings"
"github.com/mclucy/lucy/types"
)
func resolveResourceByProjectName(name types.ProjectName) (*resourceResponse, error) {
if id, ok := parseNumericResourceID(name); ok {
return getResource(id)
}
results, err := searchResources(name.String(), types.SearchOptions{SortBy: types.SearchSortRelevance})
if err != nil {
return nil, err
}
for _, candidate := range results {
if normalizedProjectName(candidate.Name) == name || strings.EqualFold(candidate.Name, name.String()) {
return getResource(candidate.ID)
}
}
if len(results) == 1 {
return getResource(results[0].ID)
}
return nil, ErrNoProject
}
func resolveVersion(resource *resourceResponse, requested types.RawVersion) (resolvedVersion, error) {
switch requested {
case "", types.VersionAny, types.VersionNone, types.VersionLatest, types.VersionCompatible:
latest, err := getLatestVersion(resource.ID)
if err != nil {
return resolvedVersion{}, err
}
return NewResolvedVersion(*resource, *latest), nil
}
versions, err := listVersions(resource.ID)
if err != nil {
return resolvedVersion{}, err
}
for _, version := range versions {
resolved := NewResolvedVersion(*resource, version)
if resolved.Matches(requested) {
return resolved, nil
}
}
return resolvedVersion{}, ErrNoVersion
}
// Package upstream defines the core upstream abstraction layer.
//
// Architecture overview:
// - types.Source is a stable user-facing identifier (CLI/config/storage).
// - Provider is a behavior interface that executes upstream operations.
// - Source selection policy lives outside this package in a dedicated resolver
// package under upstream (currently upstream/routing).
//
// Dependency inversion:
// - This package defines interfaces and normalized conversion contracts.
// - Concrete providers (modrinth, mcdr, curseforge, githubsource) implement
// Provider and depend on these contracts, not the other way around.
// - Callers pass Provider into Fetch/Search/Information. Core logic depends on
// abstractions rather than concrete upstream implementations.
//
// Boundary:
// - upstream package executes provider capabilities and normalizes outputs.
// - Source selection, source-auto policy, and multi-provider execution
// strategies are handled by routing logic in subpackages.
package upstream
import (
"fmt"
"github.com/mclucy/lucy/types"
)
// IoC via dependency injection
func Fetch(
provider Provider,
id types.PackageId,
) (result FetchResult, err error) {
resolvedID, err := provider.ParseAmbiguousId(id)
if err != nil {
return FetchResult{}, err
}
raw, err := provider.Fetch(resolvedID)
if err != nil {
return FetchResult{}, err
}
result.ResolvedID = resolvedID
result.Remote = raw.ToPackageRemote()
return result, nil
}
func Dependencies(
provider Provider,
id types.PackageId,
) (deps *types.PackageDependencies, err error) {
raw, err := provider.Dependencies(id)
if err != nil {
return nil, err
}
result := raw.ToPackageDependencies()
return &result, nil
}
func PlatformSupport(src types.Source, name types.ProjectName) (
supports *types.PlatformSupport,
err error,
) {
// TODO: Implement
panic("not implemented")
}
func Information(
provider Provider,
name types.ProjectName,
) (info types.ProjectInformation, err error) {
raw, err := provider.Information(name)
if err != nil {
return types.ProjectInformation{}, err
}
info = raw.ToProjectInformation()
return info, nil
}
func Search(
provider Provider,
query types.ProjectName,
option types.SearchOptions,
) (res types.SearchResults, err error) {
raw, err := provider.Search(string(query), option)
if err != nil {
return res, err
}
res = raw.ToSearchResults()
if len(res.Projects) == 0 {
return res, fmt.Errorf("no projects found for \"%s\"", query)
}
return res, nil
}
// InferVersion replaces inferable version constants with their inferred versions
// through sources. You should call this function before parsing the version to
// ComparableVersion.
//
// TODO: Remove, infer version should not be exposed. All inference will be done in providers.
func InferVersion(
provider Provider,
id types.PackageId,
) (infer types.PackageId) {
return id
}
package upstream
import (
"strings"
"github.com/mclucy/lucy/tools"
)
// LooksLikeMarkdown returns true when rendering the text as markdown produces a
// meaningfully different result than showing the original plain text.
func LooksLikeMarkdown(text string) bool {
trimmed := strings.TrimSpace(text)
if trimmed == "" {
return false
}
rendered := strings.TrimSpace(tools.MarkdownToAnsi(trimmed, 80))
if rendered == "" || rendered == trimmed {
return false
}
plain := normalizeMarkdownDetectionText(trimmed)
markdown := normalizeMarkdownDetectionText(rendered)
if markdown == "" || markdown == plain {
return false
}
return true
}
func normalizeMarkdownDetectionText(text string) string {
fields := strings.Fields(text)
return strings.Join(fields, " ")
}
package util
import (
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"fmt"
"hash"
"io"
"net/http"
"os"
"path"
"time"
"github.com/mclucy/lucy/cache"
"github.com/mclucy/lucy/logger"
"github.com/mclucy/lucy/tools"
)
type DownloadOptions struct {
Kind cache.EntryKind
ExpectedHash string
HashAlgorithm cache.HashAlgorithm
Filename string
WrapReader func(io.Reader, int64) io.Reader
OnCacheHit func()
OnResolvedFilename func(string)
TTL time.Duration
FileMode os.FileMode
}
type BytesRequestOptions struct {
Kind cache.EntryKind
ExpectedHash string
HashAlgorithm cache.HashAlgorithm
TTL time.Duration
MaxBytes int64
}
type DownloadResult struct {
File *os.File
CacheHit bool
Verified bool
}
// CachedDownload downloads a file from url into dir, using the cache for
// deduplication. On cache hit the file is copied from the store and
// OnCacheHit (if set) is called. On miss the response body is streamed
// through an optional WrapReader (for progress tracking) and simultaneously
// hashed for both content-addressing and integrity verification.
func CachedDownload(url, dir string, opts DownloadOptions) (
*DownloadResult,
error,
) {
if opts.FileMode == 0 {
opts.FileMode = 0o640
}
hit, cachedFile, err := cache.Network().Get(url)
if err != nil {
logger.Warn(
fmt.Errorf(
"cache lookup failed, proceeding with download: %w",
err,
),
)
}
if hit && cachedFile != nil {
defer cachedFile.Close()
resolvedName := path.Base(cachedFile.Name())
if opts.OnResolvedFilename != nil {
opts.OnResolvedFilename(resolvedName)
}
if opts.OnCacheHit != nil {
opts.OnCacheHit()
}
destPath := path.Join(dir, resolvedName)
destFile, err := tools.CopyFile(cachedFile, destPath, opts.FileMode)
if err != nil {
return nil, fmt.Errorf(
"failed to copy cached file to destination: %w",
err,
)
}
return &DownloadResult{
File: destFile,
CacheHit: true,
Verified: false,
}, nil
}
return downloadAndCache(url, dir, opts)
}
// CachedGetBytes fetches bytes from url, using the cache for deduplication.
// On cache hit the bytes are returned directly. On miss the response body is
// read into memory with a size limit, hashed for content-addressing and
// integrity verification, then cached.
func CachedGetBytes(url string, opts BytesRequestOptions) ([]byte, error) {
hit, data, err := cache.Network().GetBytes(url)
if err != nil {
logger.Warn(
fmt.Errorf(
"cache lookup failed, proceeding with fetch: %w",
err,
),
)
}
if hit && data != nil {
return data, nil
}
maxBytes := opts.MaxBytes
if maxBytes == 0 {
maxBytes = 50 * 1024 * 1024
}
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("fetch failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return nil, fmt.Errorf("fetch failed: status %d", resp.StatusCode)
}
contentHasher := sha256.New()
limitedReader := io.LimitReader(resp.Body, maxBytes)
writers := []io.Writer{contentHasher}
var integrityHasher hash.Hash
if opts.ExpectedHash != "" && opts.HashAlgorithm != cache.HashNone {
integrityHasher = newHasher(opts.HashAlgorithm)
if integrityHasher != nil {
writers = append(writers, integrityHasher)
}
}
w := io.MultiWriter(writers...)
bytes, err := io.ReadAll(io.TeeReader(limitedReader, w))
if err != nil {
return nil, fmt.Errorf("read failed: %w", err)
}
if int64(len(bytes)) >= maxBytes {
return nil, fmt.Errorf("response too large: exceeded %d bytes", maxBytes)
}
contentHash := hex.EncodeToString(contentHasher.Sum(nil))
integrity, _, err := verifyIntegrity(integrityHasher, opts.HashAlgorithm, opts.ExpectedHash, url)
if err != nil {
return nil, err
}
ttl := resolveTTL(opts.Kind, opts.TTL)
if err := cache.Network().AddEntry(
bytes, contentHash, url, opts.Kind, integrity, ttl,
); err != nil {
logger.Warn(fmt.Errorf("failed to cache bytes: %w", err))
}
return bytes, nil
}
func downloadAndCache(url, dir string, opts DownloadOptions) (
*DownloadResult,
error,
) {
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("download failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return nil, fmt.Errorf("download failed: status %d", resp.StatusCode)
}
filename := opts.Filename
if filename == "" {
filename = speculateFilename(resp)
}
if opts.OnResolvedFilename != nil && filename != "" {
opts.OnResolvedFilename(filename)
}
tmpFile, err := os.CreateTemp("", "lucy-download-*")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
tmpPath := tmpFile.Name()
defer func() {
tmpFile.Close()
os.Remove(tmpPath)
}()
contentHasher := sha256.New()
writers := []io.Writer{tmpFile, contentHasher}
var integrityHasher hash.Hash
if opts.ExpectedHash != "" && opts.HashAlgorithm != cache.HashNone {
integrityHasher = newHasher(opts.HashAlgorithm)
if integrityHasher != nil {
writers = append(writers, integrityHasher)
}
}
w := io.MultiWriter(writers...)
var reader io.Reader = resp.Body
if opts.WrapReader != nil {
reader = opts.WrapReader(reader, resp.ContentLength)
}
size, err := io.Copy(w, reader)
if err != nil {
return nil, fmt.Errorf("download stream failed: %w", err)
}
contentHash := hex.EncodeToString(contentHasher.Sum(nil))
integrity, verified, err := verifyIntegrity(integrityHasher, opts.HashAlgorithm, opts.ExpectedHash, url)
if err != nil {
return nil, err
}
if filename == "" {
filename = contentHash
}
destPath := path.Join(dir, filename)
tmpFile.Close()
src, err := os.Open(tmpPath)
if err != nil {
return nil, fmt.Errorf("failed to reopen temp file: %w", err)
}
defer src.Close()
destFile, err := tools.CopyFile(src, destPath, opts.FileMode)
if err != nil {
return nil, fmt.Errorf("failed to write file to destination: %w", err)
}
ttl := resolveTTL(opts.Kind, opts.TTL)
if err := cache.Network().IngestEntry(
tmpPath, filename, url, size, contentHash,
opts.Kind, integrity, ttl,
); err != nil {
logger.Warn(fmt.Errorf("failed to cache downloaded file: %w", err))
}
return &DownloadResult{
File: destFile,
CacheHit: false,
Verified: verified,
}, nil
}
func resolveTTL(kind cache.EntryKind, customTTL time.Duration) time.Duration {
ttl := cache.DefaultCacheConfig().DownloadKeepFor
if kind == cache.KindMetadata {
ttl = cache.DefaultCacheConfig().IndexRefreshAfter
}
if customTTL > 0 {
ttl = customTTL
}
return ttl
}
func verifyIntegrity(hasher hash.Hash, algorithm cache.HashAlgorithm, expectedHash string, url string) (cache.Integrity, bool, error) {
integrity := cache.Integrity{State: cache.IntegrityUnverified}
verified := false
if hasher != nil && expectedHash != "" {
actualHex := hex.EncodeToString(hasher.Sum(nil))
if actualHex != expectedHash {
return integrity, false, fmt.Errorf(
"integrity verification failed (%s): expected %s, got %s",
algorithm, expectedHash, actualHex,
)
}
integrity = cache.Integrity{
Algorithm: algorithm,
Expected: expectedHash,
Actual: actualHex,
State: cache.IntegrityVerified,
}
verified = true
logger.Debug(
fmt.Sprintf(
"integrity verified (%s): %s",
algorithm,
url,
),
)
}
return integrity, verified, nil
}
func newHasher(algo cache.HashAlgorithm) hash.Hash {
switch algo {
case cache.HashSHA1:
return sha1.New()
case cache.HashSHA256:
return sha256.New()
case cache.HashSHA512:
return sha512.New()
default:
return nil
}
}
// Package util is a general package for network and file system operations.
package util
import (
"mime"
"net/http"
"net/url"
"strings"
)
const (
ProgramPath = ".lucy"
// ConfigFile is Lucy's project-local policy/defaults file. The older
// .lucy/config.json path is deprecated in favor of TOML.
ConfigFile = ProgramPath + "/config.toml"
)
func speculateFilename(resp *http.Response) string {
if filename, ok := getFilenameFromHeader(resp); ok {
return filename
}
filename := getFilenameFromURL(resp.Request.URL.String())
return filename
}
func getFilenameFromHeader(resp *http.Response) (string, bool) {
contentDisposition := resp.Header.Get("Content-Disposition")
if contentDisposition == "" {
return "", false
}
_, params, err := mime.ParseMediaType(contentDisposition)
if err != nil {
return "", false
}
filename, ok := params["filename"]
return filename, ok
}
func getFilenameFromURL(urlString string) string {
u, err := url.Parse(urlString)
if err != nil {
return ""
}
segments := strings.Split(u.Path, "/")
if len(segments) == 0 {
return ""
}
filename := segments[len(segments)-1]
return filename
}
package util
import (
"io"
"net/http"
"os"
"strconv"
"sync"
)
// MultiSourceDownload expects the urls hosts the same file. However, it does
// not verify the checksums to allow more loose file recognition policies in its
// callers.
//
// Download is concurrent. Other threads will be cancelled when one thread
// complete downloaded winThreshold of the file.
//
// Note that if the urls' speed are close, urls[0] will be selected since its
// goroutine is started first.
//
// Pros:
// - Guaranteed to download the file from the fastest source.
//
// Cons:
// - Wastes bandwidth
func MultiSourceDownload(urls []string, path string) {
const winThreshold = 0.2 // 20% of the file
var wg sync.WaitGroup
var mu sync.Mutex
var win bool
var data *[]byte
var winUrl string
for _, url := range urls {
wg.Add(1)
go func(url string) {
defer wg.Done()
resp, err := http.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
// TODO: totalSize might be -1 when size it not known, handle this case
totalSize := resp.ContentLength
thresholdSize := int64(float64(totalSize) * winThreshold)
buffer := make([]byte, 2048)
var downloadedSize int64
for {
n, err := resp.Body.Read(buffer)
if err != nil && err != io.EOF {
return
}
if n == 0 {
break
}
downloadedSize += int64(n)
if win && winUrl != url {
println(
"canceling:",
url,
"("+strconv.FormatInt(downloadedSize, 10)+"/"+
strconv.FormatInt(totalSize, 10), "bytes)",
)
return
}
if downloadedSize >= thresholdSize {
mu.Lock()
if !win {
println(
"winning:",
url,
"("+strconv.FormatInt(downloadedSize, 10)+"/"+
strconv.FormatInt(totalSize, 10), "bytes)",
)
win = true
data = &buffer
winUrl = url
}
mu.Unlock()
}
}
}(url)
}
wg.Wait()
println("winning url: ", winUrl)
file, _ := os.Create(path)
defer func(file *os.File) {
_ = file.Close()
}(file)
_, err := file.Write(*data)
if err != nil {
panic(err)
}
println("Downloaded to", path)
}