1
0
mirror of synced 2026-03-31 14:04:19 +00:00

Compare commits

...

3 Commits

Author SHA1 Message Date
Stefan Prodan
8ac91c49e4 [RFC] Flux CLI Plugin System
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2026-03-30 12:13:21 +03:00
Stefan Prodan
5fc8afcaaf Merge pull request #5724 from rohansood10/feat/resolve-symlinks-5055
Add --resolve-symlinks flag to build and push artifact commands
2026-03-28 10:46:53 +02:00
Rohan Sood
7bf0bda689 Add --resolve-symlinks flag to build and push artifact commands
This adds a --resolve-symlinks flag to the flux build artifact and flux push artifact
commands. When enabled, symlinks in the source directory are resolved (copied as regular
files/directories) before building the artifact. This includes:

- Recursive symlink resolution with cycle detection
- File permission preservation
- Proper handling of both single-file and directory symlink targets
- Comprehensive test coverage

Fixes #5055

Signed-off-by: Rohan Sood <56945243+rohansood10@users.noreply.github.com>
2026-03-20 11:47:27 -07:00
4 changed files with 595 additions and 14 deletions

View File

@@ -22,6 +22,7 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
@@ -48,9 +49,10 @@ from the given directory or a single manifest file.`,
}
type buildArtifactFlags struct {
output string
path string
ignorePaths []string
output string
path string
ignorePaths []string
resolveSymlinks bool
}
var excludeOCI = append(strings.Split(sourceignore.ExcludeVCS, ","), strings.Split(sourceignore.ExcludeExt, ",")...)
@@ -61,6 +63,7 @@ func init() {
buildArtifactCmd.Flags().StringVarP(&buildArtifactArgs.path, "path", "p", "", "Path to the directory where the Kubernetes manifests are located.")
buildArtifactCmd.Flags().StringVarP(&buildArtifactArgs.output, "output", "o", "artifact.tgz", "Path to where the artifact tgz file should be written.")
buildArtifactCmd.Flags().StringSliceVar(&buildArtifactArgs.ignorePaths, "ignore-paths", excludeOCI, "set paths to ignore in .gitignore format")
buildArtifactCmd.Flags().BoolVar(&buildArtifactArgs.resolveSymlinks, "resolve-symlinks", false, "resolve symlinks by copying their targets into the artifact")
buildCmd.AddCommand(buildArtifactCmd)
}
@@ -85,6 +88,15 @@ func buildArtifactCmdRun(cmd *cobra.Command, args []string) error {
return fmt.Errorf("invalid path '%s', must point to an existing directory or file", path)
}
if buildArtifactArgs.resolveSymlinks {
resolved, cleanupDir, err := resolveSymlinks(path)
if err != nil {
return fmt.Errorf("resolving symlinks failed: %w", err)
}
defer os.RemoveAll(cleanupDir)
path = resolved
}
logger.Actionf("building artifact from %s", path)
ociClient := oci.NewClient(oci.DefaultOptions())
@@ -96,6 +108,141 @@ func buildArtifactCmdRun(cmd *cobra.Command, args []string) error {
return nil
}
// resolveSymlinks creates a temporary directory with symlinks resolved to their
// real file contents. This allows building artifacts from symlink trees (e.g.,
// those created by Nix) where the actual files live outside the source directory.
// It returns the resolved path and the temporary directory path for cleanup.
func resolveSymlinks(srcPath string) (string, string, error) {
absPath, err := filepath.Abs(srcPath)
if err != nil {
return "", "", err
}
info, err := os.Stat(absPath)
if err != nil {
return "", "", err
}
// For a single file, resolve the symlink and return the path to the
// copied file within the temp dir, preserving file semantics for callers.
if !info.IsDir() {
resolved, err := filepath.EvalSymlinks(absPath)
if err != nil {
return "", "", fmt.Errorf("resolving symlink for %s: %w", absPath, err)
}
tmpDir, err := os.MkdirTemp("", "flux-artifact-*")
if err != nil {
return "", "", err
}
dst := filepath.Join(tmpDir, filepath.Base(absPath))
if err := copyFile(resolved, dst); err != nil {
os.RemoveAll(tmpDir)
return "", "", err
}
return dst, tmpDir, nil
}
tmpDir, err := os.MkdirTemp("", "flux-artifact-*")
if err != nil {
return "", "", err
}
visited := make(map[string]bool)
if err := copyDir(absPath, tmpDir, visited); err != nil {
os.RemoveAll(tmpDir)
return "", "", err
}
return tmpDir, tmpDir, nil
}
// copyDir recursively copies the contents of srcDir to dstDir, resolving any
// symlinks encountered along the way. The visited map tracks resolved real
// directory paths to detect and break symlink cycles.
func copyDir(srcDir, dstDir string, visited map[string]bool) error {
real, err := filepath.EvalSymlinks(srcDir)
if err != nil {
return fmt.Errorf("resolving symlink %s: %w", srcDir, err)
}
abs, err := filepath.Abs(real)
if err != nil {
return fmt.Errorf("getting absolute path for %s: %w", real, err)
}
if visited[abs] {
return nil // break the cycle
}
visited[abs] = true
entries, err := os.ReadDir(srcDir)
if err != nil {
return err
}
for _, entry := range entries {
srcPath := filepath.Join(srcDir, entry.Name())
dstPath := filepath.Join(dstDir, entry.Name())
// Resolve symlinks to get the real path and info.
realPath, err := filepath.EvalSymlinks(srcPath)
if err != nil {
return fmt.Errorf("resolving symlink %s: %w", srcPath, err)
}
realInfo, err := os.Stat(realPath)
if err != nil {
return fmt.Errorf("stat resolved path %s: %w", realPath, err)
}
if realInfo.IsDir() {
if err := os.MkdirAll(dstPath, realInfo.Mode()); err != nil {
return err
}
// Recursively copy the resolved directory contents.
if err := copyDir(realPath, dstPath, visited); err != nil {
return err
}
continue
}
if !realInfo.Mode().IsRegular() {
continue
}
if err := copyFile(realPath, dstPath); err != nil {
return err
}
}
return nil
}
func copyFile(src, dst string) error {
srcInfo, err := os.Stat(src)
if err != nil {
return err
}
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil {
return err
}
out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, srcInfo.Mode())
if err != nil {
return err
}
defer out.Close()
if _, err := io.Copy(out, in); err != nil {
return err
}
return out.Close()
}
func saveReaderToFile(reader io.Reader) (string, error) {
b, err := io.ReadAll(bufio.NewReader(reader))
if err != nil {

View File

@@ -18,6 +18,7 @@ package main
import (
"os"
"path/filepath"
"strings"
"testing"
@@ -68,3 +69,113 @@ data:
}
}
func Test_resolveSymlinks(t *testing.T) {
g := NewWithT(t)
// Create source directory with a real file
srcDir := t.TempDir()
realFile := filepath.Join(srcDir, "real.yaml")
g.Expect(os.WriteFile(realFile, []byte("apiVersion: v1\nkind: Namespace\nmetadata:\n name: test\n"), 0o644)).To(Succeed())
// Create a directory with symlinks pointing to files outside it
symlinkDir := t.TempDir()
symlinkFile := filepath.Join(symlinkDir, "linked.yaml")
g.Expect(os.Symlink(realFile, symlinkFile)).To(Succeed())
// Also add a regular file in the symlink dir
regularFile := filepath.Join(symlinkDir, "regular.yaml")
g.Expect(os.WriteFile(regularFile, []byte("apiVersion: v1\nkind: ConfigMap\n"), 0o644)).To(Succeed())
// Create a symlinked subdirectory
subDir := filepath.Join(srcDir, "subdir")
g.Expect(os.MkdirAll(subDir, 0o755)).To(Succeed())
g.Expect(os.WriteFile(filepath.Join(subDir, "nested.yaml"), []byte("nested"), 0o644)).To(Succeed())
g.Expect(os.Symlink(subDir, filepath.Join(symlinkDir, "linkeddir"))).To(Succeed())
// Resolve symlinks
resolved, cleanupDir, err := resolveSymlinks(symlinkDir)
g.Expect(err).To(BeNil())
t.Cleanup(func() { os.RemoveAll(cleanupDir) })
// Verify the regular file was copied
content, err := os.ReadFile(filepath.Join(resolved, "regular.yaml"))
g.Expect(err).To(BeNil())
g.Expect(string(content)).To(Equal("apiVersion: v1\nkind: ConfigMap\n"))
// Verify the symlinked file was resolved and copied
content, err = os.ReadFile(filepath.Join(resolved, "linked.yaml"))
g.Expect(err).To(BeNil())
g.Expect(string(content)).To(ContainSubstring("kind: Namespace"))
// Verify that the resolved file is a regular file, not a symlink
info, err := os.Lstat(filepath.Join(resolved, "linked.yaml"))
g.Expect(err).To(BeNil())
g.Expect(info.Mode().IsRegular()).To(BeTrue())
// Verify that the symlinked directory was resolved and its contents were copied
content, err = os.ReadFile(filepath.Join(resolved, "linkeddir", "nested.yaml"))
g.Expect(err).To(BeNil())
g.Expect(string(content)).To(Equal("nested"))
// Verify that the file inside the symlinked directory is a regular file
info, err = os.Lstat(filepath.Join(resolved, "linkeddir", "nested.yaml"))
g.Expect(err).To(BeNil())
g.Expect(info.Mode().IsRegular()).To(BeTrue())
}
func Test_resolveSymlinks_singleFile(t *testing.T) {
g := NewWithT(t)
// Create a real file
srcDir := t.TempDir()
realFile := filepath.Join(srcDir, "manifest.yaml")
g.Expect(os.WriteFile(realFile, []byte("kind: ConfigMap"), 0o644)).To(Succeed())
// Create a symlink to the real file
linkDir := t.TempDir()
linkFile := filepath.Join(linkDir, "link.yaml")
g.Expect(os.Symlink(realFile, linkFile)).To(Succeed())
// Resolve the single symlinked file
resolved, cleanupDir, err := resolveSymlinks(linkFile)
g.Expect(err).To(BeNil())
t.Cleanup(func() { os.RemoveAll(cleanupDir) })
// The returned path should be a file, not a directory
info, err := os.Stat(resolved)
g.Expect(err).To(BeNil())
g.Expect(info.IsDir()).To(BeFalse())
// Verify contents
content, err := os.ReadFile(resolved)
g.Expect(err).To(BeNil())
g.Expect(string(content)).To(Equal("kind: ConfigMap"))
}
func Test_resolveSymlinks_cycle(t *testing.T) {
g := NewWithT(t)
// Create a directory with a symlink cycle: dir/link -> dir
dir := t.TempDir()
g.Expect(os.WriteFile(filepath.Join(dir, "file.yaml"), []byte("data"), 0o644)).To(Succeed())
g.Expect(os.Symlink(dir, filepath.Join(dir, "cycle"))).To(Succeed())
// resolveSymlinks should not infinite-loop
resolved, cleanupDir, err := resolveSymlinks(dir)
g.Expect(err).To(BeNil())
t.Cleanup(func() { os.RemoveAll(cleanupDir) })
// The file should be copied
content, err := os.ReadFile(filepath.Join(resolved, "file.yaml"))
g.Expect(err).To(BeNil())
g.Expect(string(content)).To(Equal("data"))
// The cycle directory should exist but not cause infinite nesting
_, err = os.Stat(filepath.Join(resolved, "cycle"))
g.Expect(err).To(BeNil())
// There should NOT be deeply nested cycle/cycle/cycle/... paths
_, err = os.Stat(filepath.Join(resolved, "cycle", "cycle", "cycle"))
g.Expect(os.IsNotExist(err)).To(BeTrue())
}

View File

@@ -103,17 +103,18 @@ The command can read the credentials from '~/.docker/config.json' but they can a
}
type pushArtifactFlags struct {
path string
source string
revision string
creds string
provider flags.SourceOCIProvider
ignorePaths []string
annotations []string
output string
debug bool
reproducible bool
insecure bool
path string
source string
revision string
creds string
provider flags.SourceOCIProvider
ignorePaths []string
annotations []string
output string
debug bool
reproducible bool
insecure bool
resolveSymlinks bool
}
var pushArtifactArgs = newPushArtifactFlags()
@@ -137,6 +138,7 @@ func init() {
pushArtifactCmd.Flags().BoolVarP(&pushArtifactArgs.debug, "debug", "", false, "display logs from underlying library")
pushArtifactCmd.Flags().BoolVar(&pushArtifactArgs.reproducible, "reproducible", false, "ensure reproducible image digests by setting the created timestamp to '1970-01-01T00:00:00Z'")
pushArtifactCmd.Flags().BoolVar(&pushArtifactArgs.insecure, "insecure-registry", false, "allows artifacts to be pushed without TLS")
pushArtifactCmd.Flags().BoolVar(&pushArtifactArgs.resolveSymlinks, "resolve-symlinks", false, "resolve symlinks by copying their targets into the artifact")
pushCmd.AddCommand(pushArtifactCmd)
}
@@ -183,6 +185,15 @@ func pushArtifactCmdRun(cmd *cobra.Command, args []string) error {
return fmt.Errorf("invalid path '%s', must point to an existing directory or file: %w", path, err)
}
if pushArtifactArgs.resolveSymlinks {
resolved, cleanupDir, err := resolveSymlinks(path)
if err != nil {
return fmt.Errorf("resolving symlinks failed: %w", err)
}
defer os.RemoveAll(cleanupDir)
path = resolved
}
annotations := map[string]string{}
for _, annotation := range pushArtifactArgs.annotations {
kv := strings.Split(annotation, "=")

View File

@@ -0,0 +1,312 @@
# RFC-XXXX Flux CLI Plugin System
**Status:** provisional
**Creation date:** 2026-03-30
**Last update:** 2026-03-30
## Summary
This RFC proposes a plugin system for the Flux CLI that allows external CLI tools to be
discoverable and invocable as `flux <name>` subcommands. Plugins are installed from a
centralized catalog hosted on GitHub, with SHA-256 checksum verification and automatic
version updates. The design follows the established kubectl plugin pattern used across
the Kubernetes ecosystem.
## Motivation
The Flux CLI currently has no mechanism for extending its functionality with external tools.
Projects like [flux-operator](https://github.com/controlplaneio-fluxcd/flux-operator) and
[flux-local](https://github.com/allenporter/flux-local) provide complementary CLI tools
that users install and invoke separately. This creates a fragmented user experience where
Flux-related workflows require switching between multiple binaries with different flag
conventions and discovery mechanisms.
The Kubernetes ecosystem has a proven model for CLI extensibility: kubectl plugins are
executables prefixed with `kubectl-` that can be discovered, installed via
[krew](https://krew.sigs.k8s.io/), and invoked as `kubectl <name>`. This model has
been widely adopted and is well understood by Kubernetes users.
### Goals
- Allow external CLI tools to be invoked as `flux <name>` subcommands without modifying
the external binary.
- Provide a `flux plugin install` command to download plugins from a centralized catalog
with checksum verification.
- Support shell completion for plugin subcommands by delegating to the plugin's own
Cobra `__complete` command.
- Support plugins written as scripts (Python, Bash, etc.) via symlinks into the
plugin directory.
- Ensure built-in commands always take priority over plugins.
- Keep the plugin system lightweight with zero impact on non-plugin Flux commands.
### Non-Goals
- Plugin dependency management (plugins are standalone binaries).
- Cosign/SLSA signature verification (SHA-256 only in v1beta1; signatures can be added later).
- Automatic update checks on startup (users run `flux plugin update` explicitly).
- Private catalog authentication (users can use `$FLUXCD_PLUGIN_CATALOG` with TLS).
- Flag sharing between Flux and plugins (`--namespace`, `--context`, etc. are not
forwarded; plugins manage their own flags).
## Proposal
### Plugin Discovery
Plugins are executables prefixed with `flux-` placed in a single plugin directory.
The `flux-<name>` binary maps to the `flux <name>` command. For example,
`flux-operator` becomes `flux operator`.
The default plugin directory is `~/.fluxcd/plugins/`. Users can override it with the
`$FLUXCD_PLUGINS` environment variable. Only this single directory is scanned.
When a plugin is discovered, it appears under a "Plugin Commands:" group in `flux --help`:
```
Plugin Commands:
operator Runs the operator plugin
Additional Commands:
bootstrap Deploy Flux on a cluster the GitOps way.
...
```
### Plugin Execution
On macOS and Linux, `flux operator export report` replaces the current process with
`flux-operator export report` via `syscall.Exec`, matching kubectl's behavior.
On Windows, the plugin runs as a child process with full I/O passthrough.
All arguments after the plugin name are passed through verbatim with
`DisableFlagParsing: true`.
### Shell Completion
Shell completion is delegated to the plugin binary via Cobra's `__complete` protocol.
When the user types `flux operator get <TAB>`, Flux runs
`flux-operator __complete get ""` and returns the results. This works automatically
for all Cobra-based plugins (like flux-operator). Non-Cobra plugins gracefully degrade
to no completions.
### Plugin Catalog
A dedicated GitHub repository ([fluxcd/plugins](https://github.com/fluxcd/plugins))
serves as the plugin catalog. Each plugin has a YAML manifest:
```yaml
apiVersion: cli.fluxcd.io/v1beta1
kind: Plugin
name: operator
description: Flux Operator CLI
homepage: https://fluxoperator.dev/
source: https://github.com/controlplaneio-fluxcd/flux-operator
bin: flux-operator
versions:
- version: 0.45.0
platforms:
- os: darwin
arch: arm64
url: https://github.com/.../flux-operator_0.45.0_darwin_arm64.tar.gz
checksum: sha256:cd85d5d84d264...
- os: linux
arch: amd64
url: https://github.com/.../flux-operator_0.45.0_linux_amd64.tar.gz
checksum: sha256:96198da969096...
```
A generated `catalog.yaml` (`PluginCatalog` kind) contains static metadata for all
plugins, enabling `flux plugin search` with a single HTTP fetch.
### CLI Commands
| Command | Description |
|---------|-------------|
| `flux plugin list` (alias: `ls`) | List installed plugins with versions and paths |
| `flux plugin install <name>[@<version>]` | Install a plugin from the catalog |
| `flux plugin uninstall <name>` | Remove a plugin binary and receipt |
| `flux plugin update [name]` | Update one or all installed plugins |
| `flux plugin search [query]` | Search the plugin catalog |
### Install Flow
1. Fetch `plugins/<name>.yaml` from the catalog URL
2. Validate `apiVersion: cli.fluxcd.io/v1beta1` and `kind: Plugin`
3. Resolve version (latest if unspecified, or match `@version`)
4. Find platform entry matching `runtime.GOOS` / `runtime.GOARCH`
5. Download archive to temp file with SHA-256 checksum verification
6. Extract only the declared binary from the archive (tar.gz or zip), streaming
directly to disk without buffering in memory
7. Write binary to plugin directory as `flux-<name>` (mode `0755`)
8. Write install receipt (`flux-<name>.yaml`) recording version, platform, download URL, checksum and timestamp
Install is idempotent -- reinstalling overwrites the binary and receipt.
### Install Receipts
When a plugin is installed via `flux plugin install`, a receipt file is written
next to the binary:
```yaml
name: operator
version: "0.45.0"
installedAt: "2026-03-30T10:00:00Z"
platform:
os: darwin
arch: arm64
url: https://github.com/.../flux-operator_0.45.0_darwin_arm64.tar.gz
checksum: sha256:cd85d5d84d264...
```
Receipts enable `flux plugin list` to show versions, `flux plugin update` to compare
installed vs. latest, and provenance tracking. Manually installed plugins (no receipt)
show `manual` in listings and are skipped by `flux plugin update`.
### User Stories
#### Flux User Installs a Plugin
As a Flux user, I want to install the Flux Operator CLI as a plugin so that I can
manage Flux instances using `flux operator` instead of a separate `flux-operator` binary.
```bash
flux plugin install operator
flux operator get instance -n flux-system
```
#### Flux User Updates Plugins
As a Flux user, I want to update all my installed plugins to the latest versions
with a single command.
```bash
flux plugin update
```
#### Flux User Symlinks a Python Plugin
As a Flux user, I want to use [flux-local](https://github.com/allenporter/flux-local)
(a Python tool) as a Flux CLI plugin by symlinking it into the plugin directory.
Since flux-local is not a Go binary distributed via the catalog, I install it with
pip and register it manually.
```bash
uv venv
source .venv/bin/activate
uv pip install flux-local
ln -s "$(pwd)/.venv/bin/flux-local" ~/.fluxcd/plugins/flux-local
flux local test
```
Manually symlinked plugins show `manual` in `flux plugin list` and are skipped by
`flux plugin update`.
#### Flux User Discovers Available Plugins
As a Flux user, I want to search for available plugins so that I can extend my
Flux CLI with community tools.
```bash
flux plugin search
```
#### Plugin Author Publishes a Plugin
As a plugin author, I want to submit my tool to the Flux plugin catalog so that
Flux users can install it with `flux plugin install <name>`.
1. Release binary with GoReleaser (produces tarballs/zips + checksums)
2. Submit a PR to `fluxcd/plugins` with `plugins/<name>.yaml`
3. Subsequent releases are picked up by automated polling workflows
### Alternatives
#### PATH-based Discovery (kubectl model)
kubectl discovers plugins by scanning `$PATH` for `kubectl-*` executables. This is
simple but has drawbacks:
- Scanning the entire PATH is slow on some systems
- No control over what's discoverable (any `flux-*` binary on PATH becomes a plugin)
- No install/update mechanism built in (requires a separate tool like krew)
The single-directory approach is faster, more predictable, and integrates install/update
directly into the CLI.
## Design Details
### Package Structure
```
internal/plugin/
discovery.go # Plugin dir scanning, DI-based Handler
completion.go # Shell completion via Cobra __complete protocol
exec_unix.go # syscall.Exec (//go:build !windows)
exec_windows.go # os/exec fallback (//go:build windows)
catalog.go # Catalog fetching, manifest parsing, version/platform resolution
install.go # Download, verify, extract, receipts
update.go # Compare receipts vs catalog, update check
cmd/flux/
plugin.go # Cobra command registration, all plugin subcommands
```
The `internal/plugin` package uses dependency injection (injectable `ReadDir`, `Stat`,
`GetEnv`, `HomeDir` on a `Handler` struct) for testability. Tests mock these functions
directly without filesystem fixtures.
### Plugin Directory
- **Default**: `~/.fluxcd/plugins/` -- auto-created by install/update commands
(best-effort, no error if filesystem is read-only).
- **Override**: `$FLUXCD_PLUGINS` env var replaces the default directory path.
When set, the CLI does not auto-create the directory.
### Startup Behavior
`registerPlugins()` is called in `main()` before `rootCmd.Execute()`. It scans the
plugin directory and registers discovered plugins as Cobra subcommands. The scan is
lightweight (a single `ReadDir` call) and only occurs if the plugin directory exists.
Built-in commands always take priority.
### Manifest Validation
Both plugin manifests and the catalog are validated after fetching:
- `apiVersion` must be `cli.fluxcd.io/v1beta1`
- `kind` must be `Plugin` or `PluginCatalog` respectively
- Checksum format is `<algorithm>:<hex>` (currently `sha256:...`), allowing future
algorithm migration without schema changes
### Security Considerations
- **Checksum verification**: All downloaded archives are verified against SHA-256
checksums declared in the catalog manifest before extraction.
- **Path traversal protection**: Archive extraction guards against tar traversal.
- **Response size limits**: HTTP responses from the catalog are capped at 10 MiB to
prevent unbounded memory allocation from malicious servers.
- **No code execution during discovery**: Plugin directory scanning only reads directory
entries and file metadata. No plugin binary is executed during startup.
- **Retryable fetching**: All HTTP/S operations use automatic retries for transient network failures.
### Catalog Repository CI
The `fluxcd/plugins` repository includes CI workflows that:
1. Validate plugin manifests on every PR (schema, name consistency, URL reachability,
checksum verification, binary presence in archives, no builtin collisions)
2. Regenerate `catalog.yaml` when plugins are added or removed
3. Automatically poll upstream repositories for new releases and create update PRs
### Known Limitations (v1beta1)
1. **No cosign/SLSA verification** -- SHA-256 only. Signature verification can be added later.
2. **No plugin dependencies** -- plugins are standalone binaries.
3. **No automatic update checks** -- users run `flux plugin update` explicitly.
4. **No private catalog auth** -- `$FLUXCD_PLUGIN_CATALOG` works for private URLs but no token injection.
5. **No version constraints** -- no `>=0.44.0` ranges. Exact version or latest only.
6. **Flag names differ between Flux and plugins** -- e.g., `--context` (flux) vs
`--kube-context` (flux-operator). This is a plugin concern, not a system concern.
## Implementation History
- **2026-03-30** PoC plugin catalog repository with example manifests and CI validation workflows available at [fluxcd/plugins](https://github.com/fluxcd/plugins).