From 768c28b50b938c9e67e0b922658c580d97a8933f Mon Sep 17 00:00:00 2001 From: Pavel Boldyrev Date: Sat, 21 Mar 2026 12:05:45 -0400 Subject: [PATCH 1/3] feat(provider): add `proxmox_backup_job` resource and `proxmox_backup_jobs` data source Signed-off-by: Pavel Boldyrev --- docs/data-sources/backup_jobs.md | 47 ++ docs/resources/backup_job.md | 91 ++++ .../proxmox_backup_jobs/data-source.tf | 1 + .../resources/proxmox_backup_job/import.sh | 1 + .../resources/proxmox_backup_job/resource.tf | 8 + fwprovider/attribute/attribute.go | 31 ++ fwprovider/cluster/backup/datasource.go | 178 ++++++++ fwprovider/cluster/backup/model.go | 427 ++++++++++++++++++ fwprovider/cluster/backup/resource.go | 409 +++++++++++++++++ fwprovider/cluster/backup/resource_test.go | 221 +++++++++ fwprovider/provider.go | 3 + main.go | 2 + proxmox/cluster/backup/backup.go | 83 ++++ proxmox/cluster/backup/backup_types.go | 239 ++++++++++ proxmox/cluster/backup/client.go | 21 + proxmox/cluster/client.go | 6 + 16 files changed, 1768 insertions(+) create mode 100644 docs/data-sources/backup_jobs.md create mode 100644 docs/resources/backup_job.md create mode 100644 examples/data-sources/proxmox_backup_jobs/data-source.tf create mode 100644 examples/resources/proxmox_backup_job/import.sh create mode 100644 examples/resources/proxmox_backup_job/resource.tf create mode 100644 fwprovider/cluster/backup/datasource.go create mode 100644 fwprovider/cluster/backup/model.go create mode 100644 fwprovider/cluster/backup/resource.go create mode 100644 fwprovider/cluster/backup/resource_test.go create mode 100644 proxmox/cluster/backup/backup.go create mode 100644 proxmox/cluster/backup/backup_types.go create mode 100644 proxmox/cluster/backup/client.go diff --git a/docs/data-sources/backup_jobs.md b/docs/data-sources/backup_jobs.md new file mode 100644 index 000000000..633d18b3a --- /dev/null +++ b/docs/data-sources/backup_jobs.md @@ -0,0 +1,47 @@ +--- +layout: page +title: proxmox_backup_jobs +parent: Data Sources +subcategory: Virtual Environment +description: |- + Retrieves the list of cluster-wide backup jobs. +--- + +# Data Source: proxmox_backup_jobs + +Retrieves the list of cluster-wide backup jobs. + +## Example Usage + +```terraform +data "proxmox_backup_jobs" "all" {} +``` + + +## Schema + +### Read-Only + +- `id` (String) Unique identifier for this data source. +- `jobs` (Attributes List) List of backup jobs. (see [below for nested schema](#nestedatt--jobs)) + + +### Nested Schema for `jobs` + +Read-Only: + +- `all` (Boolean) Indicates whether all VMs and CTs are backed up. +- `compress` (String) Compression algorithm used for the backup. +- `enabled` (Boolean) Indicates whether the backup job is enabled. +- `id` (String) Unique identifier of the backup job. +- `mailnotification` (String) When to send email notifications (always or failure). +- `mailto` (String) Comma-separated list of email addresses for notifications. +- `mode` (String) Backup mode (e.g. snapshot, suspend, stop). +- `node` (String) Node on which the backup job runs. +- `notes_template` (String) Template for backup notes. +- `pool` (String) Pool whose members are backed up. +- `protected` (Boolean) Indicates whether backups created by this job are protected from pruning. +- `prune_backups` (String) Prune options in the format `keep-last=N,...`. +- `schedule` (String) Backup schedule in systemd calendar format. +- `storage` (String) Target storage for the backup. +- `vmid` (List of String) List of VM/CT IDs included in the backup job. diff --git a/docs/resources/backup_job.md b/docs/resources/backup_job.md new file mode 100644 index 000000000..f455bfab4 --- /dev/null +++ b/docs/resources/backup_job.md @@ -0,0 +1,91 @@ +--- +layout: page +title: proxmox_backup_job +parent: Resources +subcategory: Virtual Environment +description: |- + Manages a Proxmox VE cluster backup job. +--- + +# Resource: proxmox_backup_job + +Manages a Proxmox VE cluster backup job. + +## Example Usage + +```terraform +resource "proxmox_backup_job" "daily_backup" { + id = "daily-backup" + schedule = "*-*-* 02:00" + storage = "local" + all = true + mode = "snapshot" + compress = "zstd" +} +``` + + +## Schema + +### Required + +- `id` (String) The identifier of the backup job. +- `schedule` (String) Backup schedule in cron format or systemd calendar event. +- `storage` (String) The storage identifier for the backup. + +### Optional + +- `all` (Boolean) Whether to back up all known guests on the node. +- `bwlimit` (Number) I/O bandwidth limit in KiB/s. +- `compress` (String) The compression algorithm (0, gzip, lzo, or zstd). +- `enabled` (Boolean) Whether the backup job is enabled. +- `exclude_path` (List of String) A list of paths to exclude from the backup. +- `fleecing` (Attributes) Fleecing configuration for the backup job. (see [below for nested schema](#nestedatt--fleecing)) +- `ionice` (Number) I/O priority (0-8). +- `lockwait` (Number) Maximum wait time in minutes for the global lock. +- `mailnotification` (String) Email notification setting (always or failure). +- `mailto` (String) A comma-separated list of email addresses to send notifications to. +- `maxfiles` (Number) Deprecated: use prune_backups instead. Maximum number of backup files per guest. +- `mode` (String) The backup mode (snapshot, suspend, or stop). +- `node` (String) The cluster node name to limit the backup job to. +- `notes_template` (String) Template for notes attached to the backup. +- `pbs_change_detection_mode` (String) PBS change detection mode (legacy, data, or metadata). +- `performance` (Attributes) Performance-related settings for the backup job. (see [below for nested schema](#nestedatt--performance)) +- `pigz` (Number) Number of pigz threads (0 disables, 1 uses single-threaded gzip). +- `pool` (String) Limit backup to guests in the specified pool. +- `protected` (Boolean) Whether the backup should be marked as protected. +- `prune_backups` (String) Retention options as a comma-separated list of key=value pairs (e.g. keep-last=3,keep-weekly=2). +- `remove` (Boolean) Whether to remove old backups if there are more than maxfiles. +- `repeat_missed` (Boolean) Whether to repeat missed backup jobs as soon as possible. +- `script` (String) Path to a script to execute before/after the backup job. +- `starttime` (String) The scheduled start time (HH:MM). +- `stdexcludes` (Boolean) Whether to exclude common temporary files from the backup. +- `stopwait` (Number) Maximum wait time in minutes for a guest to stop. +- `tmpdir` (String) Path to the temporary directory for the backup job. +- `vmid` (List of String) A list of guest VM/CT IDs to include in the backup job. +- `zstd` (Number) Number of zstd threads (0 uses half of available cores). + + +### Nested Schema for `fleecing` + +Optional: + +- `enabled` (Boolean) Whether fleecing is enabled. +- `storage` (String) The storage identifier for fleecing. + + + +### Nested Schema for `performance` + +Optional: + +- `max_workers` (Number) Maximum number of workers for parallel backup. +- `pbs_entries_max` (Number) Maximum number of entries for PBS catalog. + +## Import + +Import is supported using the following syntax: + +```shell +terraform import proxmox_backup_job.daily_backup daily-backup +``` diff --git a/examples/data-sources/proxmox_backup_jobs/data-source.tf b/examples/data-sources/proxmox_backup_jobs/data-source.tf new file mode 100644 index 000000000..06b07f95e --- /dev/null +++ b/examples/data-sources/proxmox_backup_jobs/data-source.tf @@ -0,0 +1 @@ +data "proxmox_backup_jobs" "all" {} diff --git a/examples/resources/proxmox_backup_job/import.sh b/examples/resources/proxmox_backup_job/import.sh new file mode 100644 index 000000000..253866352 --- /dev/null +++ b/examples/resources/proxmox_backup_job/import.sh @@ -0,0 +1 @@ +terraform import proxmox_backup_job.daily_backup daily-backup diff --git a/examples/resources/proxmox_backup_job/resource.tf b/examples/resources/proxmox_backup_job/resource.tf new file mode 100644 index 000000000..0fc0bcd63 --- /dev/null +++ b/examples/resources/proxmox_backup_job/resource.tf @@ -0,0 +1,8 @@ +resource "proxmox_backup_job" "daily_backup" { + id = "daily-backup" + schedule = "*-*-* 02:00" + storage = "local" + all = true + mode = "snapshot" + compress = "zstd" +} diff --git a/fwprovider/attribute/attribute.go b/fwprovider/attribute/attribute.go index 7aadcf60c..1b07c9169 100644 --- a/fwprovider/attribute/attribute.go +++ b/fwprovider/attribute/attribute.go @@ -11,8 +11,10 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/bpg/terraform-provider-proxmox/fwprovider/types/stringset" + proxmoxtypes "github.com/bpg/terraform-provider-proxmox/proxmox/types" ) // ResourceID generates an attribute definition suitable for the always-present resource `id` attribute. @@ -42,6 +44,35 @@ func IsDefined(v attr.Value) bool { return !v.IsNull() && !v.IsUnknown() } +// StringPtrFromValue returns a *string from a types.String, returning nil for null or unknown values. +// Use this instead of ValueStringPointer() when the field is Optional+Computed without a Default, +// because ValueStringPointer() returns &"" for unknown values which sends empty strings to the API. +func StringPtrFromValue(v types.String) *string { + if v.IsNull() || v.IsUnknown() { + return nil + } + + return v.ValueStringPointer() +} + +// CustomBoolPtrFromValue returns a *CustomBool from a types.Bool, returning nil for null or unknown values. +func CustomBoolPtrFromValue(v types.Bool) *proxmoxtypes.CustomBool { + if v.IsNull() || v.IsUnknown() { + return nil + } + + return proxmoxtypes.CustomBoolPtr(v.ValueBoolPointer()) +} + +// Int64PtrFromValue returns a *int64 from a types.Int64, returning nil for null or unknown values. +func Int64PtrFromValue(v types.Int64) *int64 { + if v.IsNull() || v.IsUnknown() { + return nil + } + + return v.ValueInt64Pointer() +} + // CheckDelete adds an API field name to the delete list if the plan field is null but the state field is not null. // This is used to handle attribute deletion in API calls. func CheckDelete(planField, stateField attr.Value, toDelete *[]string, apiName string) { diff --git a/fwprovider/cluster/backup/datasource.go b/fwprovider/cluster/backup/datasource.go new file mode 100644 index 000000000..74b305b19 --- /dev/null +++ b/fwprovider/cluster/backup/datasource.go @@ -0,0 +1,178 @@ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. + */ + +package backup + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/bpg/terraform-provider-proxmox/fwprovider/config" + "github.com/bpg/terraform-provider-proxmox/proxmox/cluster/backup" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &backupJobsDataSource{} + _ datasource.DataSourceWithConfigure = &backupJobsDataSource{} +) + +type backupJobsDataSource struct { + client *backup.Client +} + +// backupJobsDataSourceModel is the top-level model for the backup jobs data source. +type backupJobsDataSourceModel struct { + ID types.String `tfsdk:"id"` + Jobs []backupJobDatasourceModel `tfsdk:"jobs"` +} + +// NewDataSource creates a new backup jobs data source. +func NewDataSource() datasource.DataSource { + return &backupJobsDataSource{} +} + +func (d *backupJobsDataSource) Metadata( + _ context.Context, + _ datasource.MetadataRequest, + resp *datasource.MetadataResponse, +) { + resp.TypeName = "proxmox_backup_jobs" +} + +func (d *backupJobsDataSource) Configure( + _ context.Context, + req datasource.ConfigureRequest, + resp *datasource.ConfigureResponse, +) { + if req.ProviderData == nil { + return + } + + cfg, ok := req.ProviderData.(config.DataSource) + if !ok { + resp.Diagnostics.AddError( + "Unexpected DataSource Configure Type", + fmt.Sprintf("Expected config.DataSource, got: %T", req.ProviderData), + ) + + return + } + + d.client = cfg.Client.Cluster().Backup() +} + +func (d *backupJobsDataSource) Schema( + _ context.Context, + _ datasource.SchemaRequest, + resp *datasource.SchemaResponse, +) { + resp.Schema = schema.Schema{ + Description: "Retrieves the list of cluster-wide backup jobs.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Unique identifier for this data source.", + Computed: true, + }, + "jobs": schema.ListNestedAttribute{ + Description: "List of backup jobs.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Unique identifier of the backup job.", + Computed: true, + }, + "schedule": schema.StringAttribute{ + Description: "Backup schedule in systemd calendar format.", + Computed: true, + }, + "storage": schema.StringAttribute{ + Description: "Target storage for the backup.", + Computed: true, + }, + "enabled": schema.BoolAttribute{ + Description: "Indicates whether the backup job is enabled.", + Computed: true, + }, + "node": schema.StringAttribute{ + Description: "Node on which the backup job runs.", + Computed: true, + }, + "vmid": schema.ListAttribute{ + Description: "List of VM/CT IDs included in the backup job.", + Computed: true, + ElementType: types.StringType, + }, + "all": schema.BoolAttribute{ + Description: "Indicates whether all VMs and CTs are backed up.", + Computed: true, + }, + "mode": schema.StringAttribute{ + Description: "Backup mode (e.g. snapshot, suspend, stop).", + Computed: true, + }, + "compress": schema.StringAttribute{ + Description: "Compression algorithm used for the backup.", + Computed: true, + }, + "mailto": schema.StringAttribute{ + Description: "Comma-separated list of email addresses for notifications.", + Computed: true, + }, + "mailnotification": schema.StringAttribute{ + Description: "When to send email notifications (always or failure).", + Computed: true, + }, + "notes_template": schema.StringAttribute{ + Description: "Template for backup notes.", + Computed: true, + }, + "pool": schema.StringAttribute{ + Description: "Pool whose members are backed up.", + Computed: true, + }, + "prune_backups": schema.StringAttribute{ + Description: "Prune options in the format `keep-last=N,...`.", + Computed: true, + }, + "protected": schema.BoolAttribute{ + Description: "Indicates whether backups created by this job are protected from pruning.", + Computed: true, + }, + }, + }, + }, + }, + } +} + +func (d *backupJobsDataSource) Read( + ctx context.Context, + _ datasource.ReadRequest, + resp *datasource.ReadResponse, +) { + jobs, err := d.client.List(ctx) + if err != nil { + resp.Diagnostics.AddError("Unable to Read Backup Jobs", err.Error()) + return + } + + var state backupJobsDataSourceModel + + state.ID = types.StringValue("backup_jobs") + state.Jobs = make([]backupJobDatasourceModel, len(jobs)) + + for i, job := range jobs { + state.Jobs[i].fromAPI(job) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} diff --git a/fwprovider/cluster/backup/model.go b/fwprovider/cluster/backup/model.go new file mode 100644 index 000000000..ebb3032af --- /dev/null +++ b/fwprovider/cluster/backup/model.go @@ -0,0 +1,427 @@ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. + */ + +package backup + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + + "github.com/bpg/terraform-provider-proxmox/fwprovider/attribute" + "github.com/bpg/terraform-provider-proxmox/proxmox/cluster/backup" + proxmoxtypes "github.com/bpg/terraform-provider-proxmox/proxmox/types" +) + +type backupJobModel struct { + ID types.String `tfsdk:"id"` + Schedule types.String `tfsdk:"schedule"` + Storage types.String `tfsdk:"storage"` + Enabled types.Bool `tfsdk:"enabled"` + Node types.String `tfsdk:"node"` + VMIDs types.List `tfsdk:"vmid"` + All types.Bool `tfsdk:"all"` + Mode types.String `tfsdk:"mode"` + Compress types.String `tfsdk:"compress"` + StartTime types.String `tfsdk:"starttime"` + MaxFiles types.Int64 `tfsdk:"maxfiles"` + MailTo types.String `tfsdk:"mailto"` + MailNotification types.String `tfsdk:"mailnotification"` + BwLimit types.Int64 `tfsdk:"bwlimit"` + IONice types.Int64 `tfsdk:"ionice"` + Pigz types.Int64 `tfsdk:"pigz"` + Zstd types.Int64 `tfsdk:"zstd"` + PruneBackups types.String `tfsdk:"prune_backups"` + Remove types.Bool `tfsdk:"remove"` + NotesTemplate types.String `tfsdk:"notes_template"` + Protected types.Bool `tfsdk:"protected"` + RepeatMissed types.Bool `tfsdk:"repeat_missed"` + Script types.String `tfsdk:"script"` + StdExcludes types.Bool `tfsdk:"stdexcludes"` + ExcludePath types.List `tfsdk:"exclude_path"` + Pool types.String `tfsdk:"pool"` + Fleecing types.Object `tfsdk:"fleecing"` + Performance types.Object `tfsdk:"performance"` + PBSChangeDetectionMode types.String `tfsdk:"pbs_change_detection_mode"` + LockWait types.Int64 `tfsdk:"lockwait"` + StopWait types.Int64 `tfsdk:"stopwait"` + TmpDir types.String `tfsdk:"tmpdir"` +} + +type fleecingModel struct { + Enabled types.Bool `tfsdk:"enabled"` + Storage types.String `tfsdk:"storage"` +} + +type performanceModel struct { + MaxWorkers types.Int64 `tfsdk:"max_workers"` + PBSEntriesMax types.Int64 `tfsdk:"pbs_entries_max"` +} + +func fleecingAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "enabled": types.BoolType, + "storage": types.StringType, + } +} + +func performanceAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "max_workers": types.Int64Type, + "pbs_entries_max": types.Int64Type, + } +} + +// int64PtrToIntPtr converts *int64 to *int for API fields. +func int64PtrToIntPtr(v *int64) *int { + if v == nil { + return nil + } + + i := int(*v) + + return &i +} + +// intPtrToInt64Ptr converts *int to *int64 for Terraform state. +func intPtrToInt64Ptr(v *int) *int64 { + if v == nil { + return nil + } + + i := int64(*v) + + return &i +} + +func (m *backupJobModel) toCreateAPI(ctx context.Context, diags *diag.Diagnostics) *backup.CreateRequestBody { + body := &backup.CreateRequestBody{} + + body.ID = m.ID.ValueString() + body.Schedule = m.Schedule.ValueString() + body.Storage = m.Storage.ValueString() + + m.fillCommonFields(ctx, &body.RequestBodyCommon, diags) + + return body +} + +func (m *backupJobModel) toUpdateAPI( + ctx context.Context, + state *backupJobModel, + diags *diag.Diagnostics, +) *backup.UpdateRequestBody { + body := &backup.UpdateRequestBody{} + + body.Schedule = m.Schedule.ValueStringPointer() + body.Storage = m.Storage.ValueStringPointer() + + m.fillCommonFields(ctx, &body.RequestBodyCommon, diags) + + var toDelete []string + + attribute.CheckDelete(m.Node, state.Node, &toDelete, "node") + attribute.CheckDelete(m.VMIDs, state.VMIDs, &toDelete, "vmid") + + // Also clear vmid when transitioning from non-empty to empty list + if !m.VMIDs.IsNull() && !m.VMIDs.IsUnknown() && len(m.VMIDs.Elements()) == 0 && + !state.VMIDs.IsNull() && len(state.VMIDs.Elements()) > 0 { + toDelete = append(toDelete, "vmid") + } + + attribute.CheckDelete(m.Mode, state.Mode, &toDelete, "mode") + attribute.CheckDelete(m.Compress, state.Compress, &toDelete, "compress") + attribute.CheckDelete(m.StartTime, state.StartTime, &toDelete, "starttime") + attribute.CheckDelete(m.MaxFiles, state.MaxFiles, &toDelete, "maxfiles") + attribute.CheckDelete(m.MailTo, state.MailTo, &toDelete, "mailto") + attribute.CheckDelete(m.MailNotification, state.MailNotification, &toDelete, "mailnotification") + attribute.CheckDelete(m.BwLimit, state.BwLimit, &toDelete, "bwlimit") + attribute.CheckDelete(m.IONice, state.IONice, &toDelete, "ionice") + attribute.CheckDelete(m.Pigz, state.Pigz, &toDelete, "pigz") + attribute.CheckDelete(m.Zstd, state.Zstd, &toDelete, "zstd") + attribute.CheckDelete(m.PruneBackups, state.PruneBackups, &toDelete, "prune-backups") + attribute.CheckDelete(m.Remove, state.Remove, &toDelete, "remove") + attribute.CheckDelete(m.NotesTemplate, state.NotesTemplate, &toDelete, "notes-template") + attribute.CheckDelete(m.Protected, state.Protected, &toDelete, "protected") + attribute.CheckDelete(m.RepeatMissed, state.RepeatMissed, &toDelete, "repeat-missed") + attribute.CheckDelete(m.Script, state.Script, &toDelete, "script") + attribute.CheckDelete(m.StdExcludes, state.StdExcludes, &toDelete, "stdexcludes") + attribute.CheckDelete(m.ExcludePath, state.ExcludePath, &toDelete, "exclude-path") + + // Also clear exclude-path when transitioning from non-empty to empty list + // (CheckDelete only detects null, not empty list) + if !m.ExcludePath.IsNull() && !m.ExcludePath.IsUnknown() && len(m.ExcludePath.Elements()) == 0 && + !state.ExcludePath.IsNull() && len(state.ExcludePath.Elements()) > 0 { + toDelete = append(toDelete, "exclude-path") + } + + attribute.CheckDelete(m.Pool, state.Pool, &toDelete, "pool") + attribute.CheckDelete(m.Fleecing, state.Fleecing, &toDelete, "fleecing") + attribute.CheckDelete(m.Performance, state.Performance, &toDelete, "performance") + attribute.CheckDelete(m.PBSChangeDetectionMode, state.PBSChangeDetectionMode, &toDelete, "pbs-change-detection-mode") + attribute.CheckDelete(m.LockWait, state.LockWait, &toDelete, "lockwait") + attribute.CheckDelete(m.StopWait, state.StopWait, &toDelete, "stopwait") + attribute.CheckDelete(m.TmpDir, state.TmpDir, &toDelete, "tmpdir") + attribute.CheckDelete(m.Enabled, state.Enabled, &toDelete, "enabled") + attribute.CheckDelete(m.All, state.All, &toDelete, "all") + + if len(toDelete) > 0 { + body.Delete = toDelete + } + + return body +} + +func (m *backupJobModel) fillCommonFields( + ctx context.Context, + common *backup.RequestBodyCommon, + diags *diag.Diagnostics, +) { + common.Enabled = attribute.CustomBoolPtrFromValue(m.Enabled) + common.Node = attribute.StringPtrFromValue(m.Node) + common.All = attribute.CustomBoolPtrFromValue(m.All) + common.Mode = attribute.StringPtrFromValue(m.Mode) + common.Compress = attribute.StringPtrFromValue(m.Compress) + common.StartTime = attribute.StringPtrFromValue(m.StartTime) + common.MaxFiles = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.MaxFiles)) + common.MailTo = attribute.StringPtrFromValue(m.MailTo) + common.MailNotification = attribute.StringPtrFromValue(m.MailNotification) + common.BwLimit = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.BwLimit)) + common.IONice = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.IONice)) + common.Pigz = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.Pigz)) + common.Zstd = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.Zstd)) + common.PruneBackups = attribute.StringPtrFromValue(m.PruneBackups) + common.Remove = attribute.CustomBoolPtrFromValue(m.Remove) + common.NotesTemplate = attribute.StringPtrFromValue(m.NotesTemplate) + common.Protected = attribute.CustomBoolPtrFromValue(m.Protected) + common.RepeatMissed = attribute.CustomBoolPtrFromValue(m.RepeatMissed) + common.Script = attribute.StringPtrFromValue(m.Script) + common.StdExcludes = attribute.CustomBoolPtrFromValue(m.StdExcludes) + common.Pool = attribute.StringPtrFromValue(m.Pool) + common.PBSChangeDetectionMode = attribute.StringPtrFromValue(m.PBSChangeDetectionMode) + common.LockWait = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.LockWait)) + common.StopWait = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.StopWait)) + common.TmpDir = attribute.StringPtrFromValue(m.TmpDir) + + // VMID: convert types.List to comma-separated string + if !m.VMIDs.IsNull() && !m.VMIDs.IsUnknown() { + var vmids []string + + d := m.VMIDs.ElementsAs(ctx, &vmids, false) + diags.Append(d...) + + if !d.HasError() && len(vmids) > 0 { + vmidStr := strings.Join(vmids, ",") + common.VMID = &vmidStr + } + } + + // ExcludePath: convert types.List to comma-separated string + if !m.ExcludePath.IsNull() && !m.ExcludePath.IsUnknown() { + var paths []string + + d := m.ExcludePath.ElementsAs(ctx, &paths, false) + diags.Append(d...) + + if !d.HasError() && len(paths) > 0 { + excludeStr := strings.Join(paths, ",") + common.ExcludePath = &excludeStr + } + } + + // Fleecing: extract nested object + if !m.Fleecing.IsNull() && !m.Fleecing.IsUnknown() { + var fleecing fleecingModel + + d := m.Fleecing.As(ctx, &fleecing, basetypes.ObjectAsOptions{}) + diags.Append(d...) + + if !d.HasError() { + common.Fleecing = &backup.FleecingConfig{ + Enabled: proxmoxtypes.CustomBoolPtr(fleecing.Enabled.ValueBoolPointer()), + Storage: fleecing.Storage.ValueStringPointer(), + } + } + } + + // Performance: extract nested object + if !m.Performance.IsNull() && !m.Performance.IsUnknown() { + var perf performanceModel + + d := m.Performance.As(ctx, &perf, basetypes.ObjectAsOptions{}) + diags.Append(d...) + + if !d.HasError() { + common.Performance = &backup.PerformanceConfig{ + MaxWorkers: int64PtrToIntPtr(perf.MaxWorkers.ValueInt64Pointer()), + PBSEntriesMax: int64PtrToIntPtr(perf.PBSEntriesMax.ValueInt64Pointer()), + } + } + } +} + +func (m *backupJobModel) fromAPI( + ctx context.Context, + data *backup.GetResponseData, +) diag.Diagnostics { + var diags diag.Diagnostics + + m.ID = types.StringValue(data.ID) + m.Schedule = types.StringValue(data.Schedule) + m.Storage = types.StringValue(data.Storage) + m.Enabled = types.BoolPointerValue(data.Enabled.PointerBool()) + m.Node = types.StringPointerValue(data.Node) + m.All = types.BoolPointerValue(data.All.PointerBool()) + + // VMID: convert comma-separated string to list + if data.VMID != nil && *data.VMID != "" { + ids := strings.Split(*data.VMID, ",") + vmidValues := make([]attr.Value, len(ids)) + + for i, id := range ids { + vmidValues[i] = types.StringValue(strings.TrimSpace(id)) + } + + m.VMIDs, _ = types.ListValue(types.StringType, vmidValues) + } else { + m.VMIDs = types.ListNull(types.StringType) + } + + m.Mode = types.StringPointerValue(data.Mode) + m.Compress = types.StringPointerValue(data.Compress) + m.StartTime = types.StringPointerValue(data.StartTime) + m.MaxFiles = types.Int64PointerValue(intPtrToInt64Ptr(data.MaxFiles)) + m.MailTo = types.StringPointerValue(data.MailTo) + m.MailNotification = types.StringPointerValue(data.MailNotification) + m.BwLimit = types.Int64PointerValue(intPtrToInt64Ptr(data.BwLimit)) + m.IONice = types.Int64PointerValue(intPtrToInt64Ptr(data.IONice)) + m.Pigz = types.Int64PointerValue(intPtrToInt64Ptr(data.Pigz)) + m.Zstd = types.Int64PointerValue(intPtrToInt64Ptr(data.Zstd)) + m.Remove = types.BoolPointerValue(data.Remove.PointerBool()) + m.NotesTemplate = types.StringPointerValue(data.NotesTemplate) + m.Protected = types.BoolPointerValue(data.Protected.PointerBool()) + m.RepeatMissed = types.BoolPointerValue(data.RepeatMissed.PointerBool()) + m.Script = types.StringPointerValue(data.Script) + m.StdExcludes = types.BoolPointerValue(data.StdExcludes.PointerBool()) + m.Pool = types.StringPointerValue(data.Pool) + m.PBSChangeDetectionMode = types.StringPointerValue(data.PBSChangeDetectionMode) + m.LockWait = types.Int64PointerValue(intPtrToInt64Ptr(data.LockWait)) + m.StopWait = types.Int64PointerValue(intPtrToInt64Ptr(data.StopWait)) + m.TmpDir = types.StringPointerValue(data.TmpDir) + + // PruneBackups + if data.PruneBackups != nil { + m.PruneBackups = types.StringPointerValue(data.PruneBackups.Pointer()) + } else { + m.PruneBackups = types.StringNull() + } + + // ExcludePath: convert CustomCommaSeparatedList to types.List + if data.ExcludePath != nil { + paths := make([]attr.Value, len(*data.ExcludePath)) + for i, p := range *data.ExcludePath { + paths[i] = types.StringValue(p) + } + + listVal, d := types.ListValue(types.StringType, paths) + diags.Append(d...) + + m.ExcludePath = listVal + } else { + m.ExcludePath = types.ListNull(types.StringType) + } + + // Fleecing: convert to types.Object + if data.Fleecing != nil { + fleecingVal := fleecingModel{ + Enabled: types.BoolPointerValue(data.Fleecing.Enabled.PointerBool()), + Storage: types.StringPointerValue(data.Fleecing.Storage), + } + + obj, d := types.ObjectValueFrom(ctx, fleecingAttrTypes(), fleecingVal) + diags.Append(d...) + + m.Fleecing = obj + } else { + m.Fleecing = types.ObjectNull(fleecingAttrTypes()) + } + + // Performance: convert to types.Object + if data.Performance != nil { + perfVal := performanceModel{ + MaxWorkers: types.Int64PointerValue(intPtrToInt64Ptr(data.Performance.MaxWorkers)), + PBSEntriesMax: types.Int64PointerValue(intPtrToInt64Ptr(data.Performance.PBSEntriesMax)), + } + + obj, d := types.ObjectValueFrom(ctx, performanceAttrTypes(), perfVal) + diags.Append(d...) + + m.Performance = obj + } else { + m.Performance = types.ObjectNull(performanceAttrTypes()) + } + + return diags +} + +// backupJobDatasourceModel is a simplified model for the backup job data source. +type backupJobDatasourceModel struct { + ID types.String `tfsdk:"id"` + Schedule types.String `tfsdk:"schedule"` + Storage types.String `tfsdk:"storage"` + Enabled types.Bool `tfsdk:"enabled"` + Node types.String `tfsdk:"node"` + VMIDs types.List `tfsdk:"vmid"` + All types.Bool `tfsdk:"all"` + Mode types.String `tfsdk:"mode"` + Compress types.String `tfsdk:"compress"` + MailTo types.String `tfsdk:"mailto"` + MailNotification types.String `tfsdk:"mailnotification"` + NotesTemplate types.String `tfsdk:"notes_template"` + Pool types.String `tfsdk:"pool"` + PruneBackups types.String `tfsdk:"prune_backups"` + Protected types.Bool `tfsdk:"protected"` +} + +func (m *backupJobDatasourceModel) fromAPI(data *backup.GetResponseData) { + m.ID = types.StringValue(data.ID) + m.Schedule = types.StringValue(data.Schedule) + m.Storage = types.StringValue(data.Storage) + m.Enabled = types.BoolPointerValue(data.Enabled.PointerBool()) + m.Node = types.StringPointerValue(data.Node) + m.All = types.BoolPointerValue(data.All.PointerBool()) + m.Mode = types.StringPointerValue(data.Mode) + m.Compress = types.StringPointerValue(data.Compress) + m.MailTo = types.StringPointerValue(data.MailTo) + m.MailNotification = types.StringPointerValue(data.MailNotification) + m.NotesTemplate = types.StringPointerValue(data.NotesTemplate) + m.Pool = types.StringPointerValue(data.Pool) + m.Protected = types.BoolPointerValue(data.Protected.PointerBool()) + + // VMID: convert comma-separated string to list + if data.VMID != nil && *data.VMID != "" { + ids := strings.Split(*data.VMID, ",") + vmidValues := make([]attr.Value, len(ids)) + + for i, id := range ids { + vmidValues[i] = types.StringValue(strings.TrimSpace(id)) + } + + m.VMIDs, _ = types.ListValue(types.StringType, vmidValues) + } else { + m.VMIDs = types.ListNull(types.StringType) + } + + if data.PruneBackups != nil { + m.PruneBackups = types.StringPointerValue(data.PruneBackups.Pointer()) + } else { + m.PruneBackups = types.StringNull() + } +} diff --git a/fwprovider/cluster/backup/resource.go b/fwprovider/cluster/backup/resource.go new file mode 100644 index 000000000..4d37273e2 --- /dev/null +++ b/fwprovider/cluster/backup/resource.go @@ -0,0 +1,409 @@ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. + */ + +package backup + +import ( + "context" + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/bpg/terraform-provider-proxmox/fwprovider/config" + "github.com/bpg/terraform-provider-proxmox/proxmox/api" + "github.com/bpg/terraform-provider-proxmox/proxmox/cluster/backup" +) + +var ( + _ resource.Resource = &backupJobResource{} + _ resource.ResourceWithConfigure = &backupJobResource{} + _ resource.ResourceWithImportState = &backupJobResource{} +) + +type backupJobResource struct { + client *backup.Client +} + +// NewResource creates a new backup job resource. +func NewResource() resource.Resource { + return &backupJobResource{} +} + +func (r *backupJobResource) Metadata( + _ context.Context, + _ resource.MetadataRequest, + resp *resource.MetadataResponse, +) { + resp.TypeName = "proxmox_backup_job" +} + +func (r *backupJobResource) Configure( + _ context.Context, + req resource.ConfigureRequest, + resp *resource.ConfigureResponse, +) { + if req.ProviderData == nil { + return + } + + cfg, ok := req.ProviderData.(config.Resource) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected config.Resource, got: %T", req.ProviderData), + ) + + return + } + + r.client = cfg.Client.Cluster().Backup() +} + +func (r *backupJobResource) Schema( + _ context.Context, + _ resource.SchemaRequest, + resp *resource.SchemaResponse, +) { + resp.Schema = schema.Schema{ + Description: "Manages a Proxmox VE cluster backup job.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "The identifier of the backup job.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "schedule": schema.StringAttribute{ + Description: "Backup schedule in cron format or systemd calendar event.", + Required: true, + }, + "storage": schema.StringAttribute{ + Description: "The storage identifier for the backup.", + Required: true, + }, + "enabled": schema.BoolAttribute{ + Description: "Whether the backup job is enabled.", + Optional: true, + Computed: true, + }, + "node": schema.StringAttribute{ + Description: "The cluster node name to limit the backup job to.", + Optional: true, + }, + "vmid": schema.ListAttribute{ + Description: "A list of guest VM/CT IDs to include in the backup job.", + Optional: true, + ElementType: types.StringType, + }, + "all": schema.BoolAttribute{ + Description: "Whether to back up all known guests on the node.", + Optional: true, + Computed: true, + }, + "mode": schema.StringAttribute{ + Description: "The backup mode (snapshot, suspend, or stop).", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.OneOf("snapshot", "suspend", "stop"), + }, + }, + "compress": schema.StringAttribute{ + Description: "The compression algorithm (0, gzip, lzo, or zstd).", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.OneOf("0", "1", "gzip", "lzo", "zstd"), + }, + }, + "starttime": schema.StringAttribute{ + Description: "The scheduled start time (HH:MM).", + Optional: true, + }, + "maxfiles": schema.Int64Attribute{ + Description: "Deprecated: use prune_backups instead. Maximum number of backup files per guest.", + Optional: true, + }, + "mailto": schema.StringAttribute{ + Description: "A comma-separated list of email addresses to send notifications to.", + Optional: true, + }, + "mailnotification": schema.StringAttribute{ + Description: "Email notification setting (always or failure).", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.OneOf("always", "failure"), + }, + }, + "bwlimit": schema.Int64Attribute{ + Description: "I/O bandwidth limit in KiB/s.", + Optional: true, + Computed: true, + }, + "ionice": schema.Int64Attribute{ + Description: "I/O priority (0-8).", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(0, 8), + }, + }, + "pigz": schema.Int64Attribute{ + Description: "Number of pigz threads (0 disables, 1 uses single-threaded gzip).", + Optional: true, + }, + "zstd": schema.Int64Attribute{ + Description: "Number of zstd threads (0 uses half of available cores).", + Optional: true, + }, + "prune_backups": schema.StringAttribute{ + Description: "Retention options as a comma-separated list of key=value pairs " + + "(e.g. keep-last=3,keep-weekly=2).", + Optional: true, + Computed: true, + }, + "remove": schema.BoolAttribute{ + Description: "Whether to remove old backups if there are more than maxfiles.", + Optional: true, + Computed: true, + }, + "notes_template": schema.StringAttribute{ + Description: "Template for notes attached to the backup.", + Optional: true, + }, + "protected": schema.BoolAttribute{ + Description: "Whether the backup should be marked as protected.", + Optional: true, + Computed: true, + }, + "repeat_missed": schema.BoolAttribute{ + Description: "Whether to repeat missed backup jobs as soon as possible.", + Optional: true, + Computed: true, + }, + "script": schema.StringAttribute{ + Description: "Path to a script to execute before/after the backup job.", + Optional: true, + }, + "stdexcludes": schema.BoolAttribute{ + Description: "Whether to exclude common temporary files from the backup.", + Optional: true, + Computed: true, + }, + "exclude_path": schema.ListAttribute{ + Description: "A list of paths to exclude from the backup.", + Optional: true, + ElementType: types.StringType, + }, + "pool": schema.StringAttribute{ + Description: "Limit backup to guests in the specified pool.", + Optional: true, + }, + "fleecing": schema.SingleNestedAttribute{ + Description: "Fleecing configuration for the backup job.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Description: "Whether fleecing is enabled.", + Optional: true, + Computed: true, + }, + "storage": schema.StringAttribute{ + Description: "The storage identifier for fleecing.", + Optional: true, + }, + }, + }, + "performance": schema.SingleNestedAttribute{ + Description: "Performance-related settings for the backup job.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "max_workers": schema.Int64Attribute{ + Description: "Maximum number of workers for parallel backup.", + Optional: true, + }, + "pbs_entries_max": schema.Int64Attribute{ + Description: "Maximum number of entries for PBS catalog.", + Optional: true, + }, + }, + }, + "pbs_change_detection_mode": schema.StringAttribute{ + Description: "PBS change detection mode (legacy, data, or metadata).", + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf("legacy", "data", "metadata"), + }, + }, + "lockwait": schema.Int64Attribute{ + Description: "Maximum wait time in minutes for the global lock.", + Optional: true, + }, + "stopwait": schema.Int64Attribute{ + Description: "Maximum wait time in minutes for a guest to stop.", + Optional: true, + }, + "tmpdir": schema.StringAttribute{ + Description: "Path to the temporary directory for the backup job.", + Optional: true, + }, + }, + } +} + +func (r *backupJobResource) Create( + ctx context.Context, + req resource.CreateRequest, + resp *resource.CreateResponse, +) { + var plan backupJobModel + + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + + if resp.Diagnostics.HasError() { + return + } + + createBody := plan.toCreateAPI(ctx, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + err := r.client.Create(ctx, createBody) + if err != nil { + resp.Diagnostics.AddError("Unable to Create Backup Job", err.Error()) + return + } + + // Read back to get server-assigned defaults. + data, err := r.client.Get(ctx, plan.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Unable to Read Backup Job After Creation", err.Error()) + return + } + + resp.Diagnostics.Append(plan.fromAPI(ctx, data)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *backupJobResource) Read( + ctx context.Context, + req resource.ReadRequest, + resp *resource.ReadResponse, +) { + var state backupJobModel + + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + + if resp.Diagnostics.HasError() { + return + } + + data, err := r.client.Get(ctx, state.ID.ValueString()) + if err != nil { + if errors.Is(err, api.ErrResourceDoesNotExist) { + resp.State.RemoveResource(ctx) + return + } + + resp.Diagnostics.AddError("Unable to Read Backup Job", err.Error()) + + return + } + + resp.Diagnostics.Append(state.fromAPI(ctx, data)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *backupJobResource) Update( + ctx context.Context, + req resource.UpdateRequest, + resp *resource.UpdateResponse, +) { + var plan, state backupJobModel + + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + + if resp.Diagnostics.HasError() { + return + } + + updateBody := plan.toUpdateAPI(ctx, &state, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + err := r.client.Update(ctx, plan.ID.ValueString(), updateBody) + if err != nil { + resp.Diagnostics.AddError("Unable to Update Backup Job", err.Error()) + return + } + + // Read back to get server state. + data, err := r.client.Get(ctx, plan.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Unable to Read Backup Job After Update", err.Error()) + return + } + + resp.Diagnostics.Append(plan.fromAPI(ctx, data)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *backupJobResource) Delete( + ctx context.Context, + req resource.DeleteRequest, + resp *resource.DeleteResponse, +) { + var state backupJobModel + + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + + if resp.Diagnostics.HasError() { + return + } + + err := r.client.Delete(ctx, state.ID.ValueString()) + if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) { + resp.Diagnostics.AddError("Unable to Delete Backup Job", err.Error()) + } +} + +func (r *backupJobResource) ImportState( + ctx context.Context, + req resource.ImportStateRequest, + resp *resource.ImportStateResponse, +) { + data, err := r.client.Get(ctx, req.ID) + if err != nil { + if errors.Is(err, api.ErrResourceDoesNotExist) { + resp.Diagnostics.AddError("Backup Job Not Found", + fmt.Sprintf("Backup job with ID '%s' was not found", req.ID)) + + return + } + + resp.Diagnostics.AddError("Unable to Import Backup Job", err.Error()) + + return + } + + var model backupJobModel + + resp.Diagnostics.Append(model.fromAPI(ctx, data)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &model)...) +} diff --git a/fwprovider/cluster/backup/resource_test.go b/fwprovider/cluster/backup/resource_test.go new file mode 100644 index 000000000..3cade4f43 --- /dev/null +++ b/fwprovider/cluster/backup/resource_test.go @@ -0,0 +1,221 @@ +//go:build acceptance || all + +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. + */ + +package backup_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + + "github.com/bpg/terraform-provider-proxmox/fwprovider/test" +) + +func TestAccResourceBackupJob(t *testing.T) { + t.Parallel() + + te := test.InitEnvironment(t) + + tests := []struct { + name string + steps []resource.TestStep + }{ + {"create and update backup job", []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test" { + id = "acc-test-bj" + schedule = "*-*-* 02:00" + storage = "local" + all = true + mode = "snapshot" + compress = "zstd" + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test", "id", "acc-test-bj"), + resource.TestCheckResourceAttr("proxmox_backup_job.test", "schedule", "*-*-* 02:00"), + resource.TestCheckResourceAttr("proxmox_backup_job.test", "storage", "local"), + resource.TestCheckResourceAttr("proxmox_backup_job.test", "all", "true"), + resource.TestCheckResourceAttr("proxmox_backup_job.test", "mode", "snapshot"), + resource.TestCheckResourceAttr("proxmox_backup_job.test", "compress", "zstd"), + ), + }, + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test" { + id = "acc-test-bj" + schedule = "*-*-* 03:00" + storage = "local" + all = true + mode = "stop" + compress = "lzo" + enabled = false + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test", "id", "acc-test-bj"), + resource.TestCheckResourceAttr("proxmox_backup_job.test", "schedule", "*-*-* 03:00"), + resource.TestCheckResourceAttr("proxmox_backup_job.test", "mode", "stop"), + resource.TestCheckResourceAttr("proxmox_backup_job.test", "compress", "lzo"), + resource.TestCheckResourceAttr("proxmox_backup_job.test", "enabled", "false"), + ), + }, + { + ResourceName: "proxmox_backup_job.test", + ImportStateId: "acc-test-bj", + ImportState: true, + ImportStateVerify: true, + }, + }}, + {"create with minimal attributes", []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_minimal" { + id = "acc-test-min" + schedule = "sun 01:00" + storage = "local" + all = true + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test_minimal", "id", "acc-test-min"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_minimal", "storage", "local"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_minimal", "all", "true"), + resource.TestCheckResourceAttrSet("proxmox_backup_job.test_minimal", "enabled"), + ), + }, + }}, + {"field deletion", []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_delete" { + id = "acc-test-del" + schedule = "*-*-* 04:00" + storage = "local" + all = true + mode = "snapshot" + compress = "zstd" + mailto = "test@example.com" + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "id", "acc-test-del"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "mode", "snapshot"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "compress", "zstd"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "mailto", "test@example.com"), + ), + }, + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_delete" { + id = "acc-test-del" + schedule = "*-*-* 04:00" + storage = "local" + all = true + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "id", "acc-test-del"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "all", "true"), + test.NoResourceAttributesSet("proxmox_backup_job.test_delete", []string{ + "mailto", + }), + ), + }, + }}, + {"backup specific VMs by ID", []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_vmid" { + id = "acc-test-vmid" + schedule = "*-*-* 06:00" + storage = "local" + vmid = ["100", "101", "102"] + mode = "snapshot" + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "id", "acc-test-vmid"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "vmid.#", "3"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "vmid.0", "100"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "vmid.1", "101"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "vmid.2", "102"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "mode", "snapshot"), + ), + }, + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_vmid" { + id = "acc-test-vmid" + schedule = "*-*-* 06:00" + storage = "local" + vmid = ["100", "200"] + mode = "stop" + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "vmid.#", "2"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "vmid.0", "100"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "vmid.1", "200"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_vmid", "mode", "stop"), + ), + }, + { + ResourceName: "proxmox_backup_job.test_vmid", + ImportStateId: "acc-test-vmid", + ImportState: true, + ImportStateVerify: true, + }, + }}, + {"backup with retention policy", []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_retention" { + id = "acc-test-ret" + schedule = "*-*-* 07:00" + storage = "local" + all = true + prune_backups = "keep-daily=7,keep-last=3" + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test_retention", "id", "acc-test-ret"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_retention", "prune_backups", "keep-daily=7,keep-last=3"), + ), + }, + }}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + ProtoV6ProviderFactories: te.AccProviders, + Steps: tt.steps, + }) + }) + } +} + +func TestAccDataSourceBackupJobs(t *testing.T) { + te := test.InitEnvironment(t) + + resource.ParallelTest(t, resource.TestCase{ + ProtoV6ProviderFactories: te.AccProviders, + Steps: []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_ds" { + id = "acc-test-ds" + schedule = "*-*-* 05:00" + storage = "local" + all = true + } + + data "proxmox_backup_jobs" "all" { + depends_on = [proxmox_backup_job.test_ds] + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.proxmox_backup_jobs.all", "jobs.#"), + ), + }, + }, + }) +} diff --git a/fwprovider/provider.go b/fwprovider/provider.go index 03118b548..396dd18e1 100644 --- a/fwprovider/provider.go +++ b/fwprovider/provider.go @@ -26,6 +26,7 @@ import ( "github.com/bpg/terraform-provider-proxmox/fwprovider/access" "github.com/bpg/terraform-provider-proxmox/fwprovider/cluster/acme" + "github.com/bpg/terraform-provider-proxmox/fwprovider/cluster/backup" "github.com/bpg/terraform-provider-proxmox/fwprovider/cluster/ha" "github.com/bpg/terraform-provider-proxmox/fwprovider/cluster/hardwaremapping" "github.com/bpg/terraform-provider-proxmox/fwprovider/cluster/metrics" @@ -533,6 +534,7 @@ func (p *proxmoxProvider) Resources(_ context.Context) []func() resource.Resourc acme.NewACMEPluginResource, apt.NewRepositoryResource, apt.NewStandardRepositoryResource, + backup.NewResource, clonedvm.NewResource, ha.NewHAGroupResource, ha.NewHAResourceResource, @@ -581,6 +583,7 @@ func (p *proxmoxProvider) DataSources(_ context.Context) []func() datasource.Dat acme.NewACMEPluginsDataSource, apt.NewRepositoryDataSource, apt.NewStandardRepositoryDataSource, + backup.NewDataSource, datastores.NewDataSource, ha.NewHAGroupDataSource, ha.NewHAGroupsDataSource, diff --git a/main.go b/main.go index 39375f604..f35896c90 100644 --- a/main.go +++ b/main.go @@ -41,6 +41,7 @@ import ( //go:generate cp ./build/docs-gen/data-sources/virtual_environment_apt_repository.md ./docs/data-sources/ //go:generate cp ./build/docs-gen/data-sources/virtual_environment_apt_standard_repository.md ./docs/data-sources/ //go:generate cp ./build/docs-gen/data-sources/virtual_environment_datastores.md ./docs/data-sources/ +//go:generate cp ./build/docs-gen/data-sources/backup_jobs.md ./docs/data-sources/ //go:generate cp ./build/docs-gen/data-sources/virtual_environment_file.md ./docs/data-sources/ //go:generate cp ./build/docs-gen/data-sources/files.md ./docs/data-sources/ //go:generate cp ./build/docs-gen/data-sources/virtual_environment_hagroup.md ./docs/data-sources/ @@ -65,6 +66,7 @@ import ( //go:generate cp ./build/docs-gen/data-sources/virtual_environment_sdn_zones.md ./docs/data-sources/ //go:generate cp ./build/docs-gen/data-sources/virtual_environment_version.md ./docs/data-sources/ //go:generate cp ./build/docs-gen/data-sources/virtual_environment_vm2.md ./docs/data-sources/ +//go:generate cp ./build/docs-gen/resources/backup_job.md ./docs/resources/ //go:generate cp ./build/docs-gen/resources/virtual_environment_acl.md ./docs/resources/ //go:generate cp ./build/docs-gen/resources/virtual_environment_acme_account.md ./docs/resources/ //go:generate cp ./build/docs-gen/resources/virtual_environment_acme_dns_plugin.md ./docs/resources/ diff --git a/proxmox/cluster/backup/backup.go b/proxmox/cluster/backup/backup.go new file mode 100644 index 000000000..66524ee73 --- /dev/null +++ b/proxmox/cluster/backup/backup.go @@ -0,0 +1,83 @@ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. + */ + +package backup + +import ( + "context" + "fmt" + "net/http" + "net/url" + "sort" + + "github.com/bpg/terraform-provider-proxmox/proxmox/api" +) + +// List retrieves all backup jobs, sorted by ID. +func (c *Client) List(ctx context.Context) ([]*GetResponseData, error) { + resBody := &ListResponseBody{} + + err := c.DoRequest(ctx, http.MethodGet, c.ExpandPath(""), nil, resBody) + if err != nil { + return nil, fmt.Errorf("error listing backup jobs: %w", err) + } + + if resBody.Data == nil { + return nil, api.ErrNoDataObjectInResponse + } + + sort.Slice(resBody.Data, func(i, j int) bool { + return resBody.Data[i].ID < resBody.Data[j].ID + }) + + return resBody.Data, nil +} + +// Get retrieves a single backup job by ID. +func (c *Client) Get(ctx context.Context, id string) (*GetResponseData, error) { + resBody := &GetResponseBody{} + + err := c.DoRequest(ctx, http.MethodGet, c.ExpandPath(url.PathEscape(id)), nil, resBody) + if err != nil { + return nil, fmt.Errorf("error reading backup job: %w", err) + } + + if resBody.Data == nil { + return nil, api.ErrNoDataObjectInResponse + } + + return resBody.Data, nil +} + +// Create creates a new backup job. +func (c *Client) Create(ctx context.Context, data *CreateRequestBody) error { + err := c.DoRequest(ctx, http.MethodPost, c.ExpandPath(""), data, nil) + if err != nil { + return fmt.Errorf("error creating backup job: %w", err) + } + + return nil +} + +// Update updates an existing backup job. +func (c *Client) Update(ctx context.Context, id string, data *UpdateRequestBody) error { + err := c.DoRequest(ctx, http.MethodPut, c.ExpandPath(url.PathEscape(id)), data, nil) + if err != nil { + return fmt.Errorf("error updating backup job: %w", err) + } + + return nil +} + +// Delete deletes a backup job. +func (c *Client) Delete(ctx context.Context, id string) error { + err := c.DoRequest(ctx, http.MethodDelete, c.ExpandPath(url.PathEscape(id)), nil, nil) + if err != nil { + return fmt.Errorf("error deleting backup job: %w", err) + } + + return nil +} diff --git a/proxmox/cluster/backup/backup_types.go b/proxmox/cluster/backup/backup_types.go new file mode 100644 index 000000000..ce3dbd477 --- /dev/null +++ b/proxmox/cluster/backup/backup_types.go @@ -0,0 +1,239 @@ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. + */ + +package backup + +import ( + "encoding/json" + "fmt" + "net/url" + "sort" + "strings" + + "github.com/bpg/terraform-provider-proxmox/proxmox/types" +) + +// PruneBackupsString is a custom string type that handles the Proxmox API returning +// prune-backups as either a plain string (e.g. "keep-last=2,keep-weekly=1") or a JSON object +// (e.g. {"keep-last":2,"keep-weekly":"1"}). It always stores the value as a comma-separated string. +type PruneBackupsString string + +// UnmarshalJSON handles string, object with int values, and object with string values. +func (p *PruneBackupsString) UnmarshalJSON(data []byte) error { + // Try string first. + var s string + if err := json.Unmarshal(data, &s); err == nil { + *p = PruneBackupsString(s) + return nil + } + + // Try object with varying value types (int or string). + var obj map[string]json.RawMessage + if err := json.Unmarshal(data, &obj); err != nil { + return fmt.Errorf("failed to unmarshal PruneBackupsString: %w", err) + } + + keys := make([]string, 0, len(obj)) + for k := range obj { + keys = append(keys, k) + } + + sort.Strings(keys) + + parts := make([]string, 0, len(obj)) + + for _, k := range keys { + raw := obj[k] + + // Try as integer. + var intVal int + if err := json.Unmarshal(raw, &intVal); err == nil { + parts = append(parts, fmt.Sprintf("%s=%d", k, intVal)) + continue + } + + // Try as string. + var strVal string + if err := json.Unmarshal(raw, &strVal); err == nil { + parts = append(parts, fmt.Sprintf("%s=%s", k, strVal)) + continue + } + + return fmt.Errorf("failed to unmarshal PruneBackupsString value for key %q", k) + } + + *p = PruneBackupsString(strings.Join(parts, ",")) + + return nil +} + +// String returns the string representation. +func (p PruneBackupsString) String() string { + return string(p) +} + +// Pointer returns a pointer to the underlying string value, or nil if empty. +func (p PruneBackupsString) Pointer() *string { + if p == "" { + return nil + } + + s := string(p) + + return &s +} + +// ListResponseBody contains the body from a backup job list response. +type ListResponseBody struct { + Data []*GetResponseData `json:"data,omitempty"` +} + +// GetResponseBody contains the body from a backup job get response. +type GetResponseBody struct { + Data *GetResponseData `json:"data,omitempty"` +} + +// GetResponseData contains the data from a backup job get response. +type GetResponseData struct { + ID string `json:"id"` + Type *string `json:"type,omitempty"` + Enabled *types.CustomBool `json:"enabled,omitempty"` + Schedule string `json:"schedule"` + Storage string `json:"storage"` + Node *string `json:"node,omitempty"` + VMID *string `json:"vmid,omitempty"` + All *types.CustomBool `json:"all,omitempty"` + Mode *string `json:"mode,omitempty"` + Compress *string `json:"compress,omitempty"` + StartTime *string `json:"starttime,omitempty"` + MaxFiles *int `json:"maxfiles,omitempty"` + MailTo *string `json:"mailto,omitempty"` + MailNotification *string `json:"mailnotification,omitempty"` + BwLimit *int `json:"bwlimit,omitempty"` + IONice *int `json:"ionice,omitempty"` + Pigz *int `json:"pigz,omitempty"` + Zstd *int `json:"zstd,omitempty"` + PruneBackups *PruneBackupsString `json:"prune-backups,omitempty"` + Remove *types.CustomBool `json:"remove,omitempty"` + NotesTemplate *string `json:"notes-template,omitempty"` + Protected *types.CustomBool `json:"protected,omitempty"` + RepeatMissed *types.CustomBool `json:"repeat-missed,omitempty"` + Script *string `json:"script,omitempty"` + StdExcludes *types.CustomBool `json:"stdexcludes,omitempty"` + ExcludePath *types.CustomCommaSeparatedList `json:"exclude-path,omitempty"` + Pool *string `json:"pool,omitempty"` + Fleecing *FleecingConfig `json:"fleecing,omitempty"` + Performance *PerformanceConfig `json:"performance,omitempty"` + PBSChangeDetectionMode *string `json:"pbs-change-detection-mode,omitempty"` + LockWait *int `json:"lockwait,omitempty"` + StopWait *int `json:"stopwait,omitempty"` + TmpDir *string `json:"tmpdir,omitempty"` +} + +// RequestBodyCommon contains common fields for backup job create and update requests. +type RequestBodyCommon struct { + Enabled *types.CustomBool `json:"enabled,omitempty" url:"enabled,omitempty,int"` + Node *string `json:"node,omitempty" url:"node,omitempty"` + VMID *string `json:"vmid,omitempty" url:"vmid,omitempty"` + All *types.CustomBool `json:"all,omitempty" url:"all,omitempty,int"` + Mode *string `json:"mode,omitempty" url:"mode,omitempty"` + Compress *string `json:"compress,omitempty" url:"compress,omitempty"` + StartTime *string `json:"starttime,omitempty" url:"starttime,omitempty"` + MaxFiles *int `json:"maxfiles,omitempty" url:"maxfiles,omitempty"` + MailTo *string `json:"mailto,omitempty" url:"mailto,omitempty"` + MailNotification *string `json:"mailnotification,omitempty" url:"mailnotification,omitempty"` + BwLimit *int `json:"bwlimit,omitempty" url:"bwlimit,omitempty"` + IONice *int `json:"ionice,omitempty" url:"ionice,omitempty"` + Pigz *int `json:"pigz,omitempty" url:"pigz,omitempty"` + Zstd *int `json:"zstd,omitempty" url:"zstd,omitempty"` + PruneBackups *string `json:"prune-backups,omitempty" url:"prune-backups,omitempty"` + Remove *types.CustomBool `json:"remove,omitempty" url:"remove,omitempty,int"` + NotesTemplate *string `json:"notes-template,omitempty" url:"notes-template,omitempty"` + Protected *types.CustomBool `json:"protected,omitempty" url:"protected,omitempty,int"` + RepeatMissed *types.CustomBool `json:"repeat-missed,omitempty" url:"repeat-missed,omitempty,int"` + Script *string `json:"script,omitempty" url:"script,omitempty"` + StdExcludes *types.CustomBool `json:"stdexcludes,omitempty" url:"stdexcludes,omitempty,int"` + ExcludePath *string `json:"exclude-path,omitempty" url:"exclude-path,omitempty"` + Pool *string `json:"pool,omitempty" url:"pool,omitempty"` + Fleecing *FleecingConfig `json:"fleecing,omitempty" url:"fleecing,omitempty"` + Performance *PerformanceConfig `json:"performance,omitempty" url:"performance,omitempty"` + PBSChangeDetectionMode *string `json:"pbs-change-detection-mode,omitempty" url:"pbs-change-detection-mode,omitempty"` + LockWait *int `json:"lockwait,omitempty" url:"lockwait,omitempty"` + StopWait *int `json:"stopwait,omitempty" url:"stopwait,omitempty"` + TmpDir *string `json:"tmpdir,omitempty" url:"tmpdir,omitempty"` +} + +// CreateRequestBody contains the body for creating a new backup job. +type CreateRequestBody struct { + RequestBodyCommon + + ID string `json:"id" url:"id"` + Schedule string `json:"schedule" url:"schedule"` + Storage string `json:"storage" url:"storage"` +} + +// UpdateRequestBody contains the body for updating an existing backup job. +type UpdateRequestBody struct { + RequestBodyCommon + + Schedule *string `json:"schedule,omitempty" url:"schedule,omitempty"` + Storage *string `json:"storage,omitempty" url:"storage,omitempty"` + Delete []string `json:"delete,omitempty" url:"delete,omitempty,comma"` +} + +// FleecingConfig contains the fleecing configuration for a backup job. +type FleecingConfig struct { + Enabled *types.CustomBool `json:"enabled,omitempty" url:"enabled,omitempty,int"` + Storage *string `json:"storage,omitempty" url:"storage,omitempty"` +} + +// EncodeValues encodes the FleecingConfig into URL values as a comma-separated key=value string. +func (f *FleecingConfig) EncodeValues(key string, v *url.Values) error { + var parts []string + + if f.Enabled != nil { + if *f.Enabled { + parts = append(parts, "enabled=1") + } else { + parts = append(parts, "enabled=0") + } + } + + if f.Storage != nil { + parts = append(parts, fmt.Sprintf("storage=%s", *f.Storage)) + } + + if len(parts) > 0 { + v.Add(key, strings.Join(parts, ",")) + } + + return nil +} + +// PerformanceConfig contains the performance configuration for a backup job. +type PerformanceConfig struct { + MaxWorkers *int `json:"max-workers,omitempty" url:"max-workers,omitempty"` + PBSEntriesMax *int `json:"pbs-entries-max,omitempty" url:"pbs-entries-max,omitempty"` +} + +// EncodeValues encodes the PerformanceConfig into URL values as a comma-separated key=value string. +func (p *PerformanceConfig) EncodeValues(key string, v *url.Values) error { + var parts []string + + if p.MaxWorkers != nil { + parts = append(parts, fmt.Sprintf("max-workers=%d", *p.MaxWorkers)) + } + + if p.PBSEntriesMax != nil { + parts = append(parts, fmt.Sprintf("pbs-entries-max=%d", *p.PBSEntriesMax)) + } + + if len(parts) > 0 { + v.Add(key, strings.Join(parts, ",")) + } + + return nil +} diff --git a/proxmox/cluster/backup/client.go b/proxmox/cluster/backup/client.go new file mode 100644 index 000000000..0a8ddf1b7 --- /dev/null +++ b/proxmox/cluster/backup/client.go @@ -0,0 +1,21 @@ +/* + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. + */ + +package backup + +import ( + "github.com/bpg/terraform-provider-proxmox/proxmox/api" +) + +// Client is an interface for accessing the Proxmox cluster backup API. +type Client struct { + api.Client +} + +// ExpandPath expands a relative path to a full cluster backup API path. +func (c *Client) ExpandPath(path string) string { + return c.Client.ExpandPath("backup/" + path) +} diff --git a/proxmox/cluster/client.go b/proxmox/cluster/client.go index ff60f1471..69dfb99b2 100644 --- a/proxmox/cluster/client.go +++ b/proxmox/cluster/client.go @@ -11,6 +11,7 @@ import ( "github.com/bpg/terraform-provider-proxmox/proxmox/api" "github.com/bpg/terraform-provider-proxmox/proxmox/cluster/acme" + "github.com/bpg/terraform-provider-proxmox/proxmox/cluster/backup" clusterfirewall "github.com/bpg/terraform-provider-proxmox/proxmox/cluster/firewall" "github.com/bpg/terraform-provider-proxmox/proxmox/cluster/ha" "github.com/bpg/terraform-provider-proxmox/proxmox/cluster/mapping" @@ -55,6 +56,11 @@ func (c *Client) ACME() *acme.Client { return &acme.Client{Client: c} } +// Backup returns a client for managing cluster backup jobs. +func (c *Client) Backup() *backup.Client { + return &backup.Client{Client: c} +} + // Metrics returns a client for managing the cluster's metrics features. func (c *Client) Metrics() *metrics.Client { return &metrics.Client{Client: c} From d2cd9a5e01b05f8fb21ea00fae29cf2d7bf75f82 Mon Sep 17 00:00:00 2001 From: Pavel Boldyrev Date: Sat, 21 Mar 2026 13:22:49 -0400 Subject: [PATCH 2/3] review feedback Signed-off-by: Pavel Boldyrev --- docs/data-sources/backup_jobs.md | 4 +- docs/resources/backup_job.md | 4 +- fwprovider/attribute/attribute.go | 10 ++ fwprovider/cluster/backup/datasource.go | 10 +- fwprovider/cluster/backup/model.go | 177 +++++++++++++++++---- fwprovider/cluster/backup/resource.go | 84 ++++++++-- fwprovider/cluster/backup/resource_test.go | 84 +++++++++- proxmox/cluster/backup/backup_types.go | 8 +- 8 files changed, 319 insertions(+), 62 deletions(-) diff --git a/docs/data-sources/backup_jobs.md b/docs/data-sources/backup_jobs.md index 633d18b3a..4212408d3 100644 --- a/docs/data-sources/backup_jobs.md +++ b/docs/data-sources/backup_jobs.md @@ -35,13 +35,13 @@ Read-Only: - `enabled` (Boolean) Indicates whether the backup job is enabled. - `id` (String) Unique identifier of the backup job. - `mailnotification` (String) When to send email notifications (always or failure). -- `mailto` (String) Comma-separated list of email addresses for notifications. +- `mailto` (List of String) List of email addresses for notifications. - `mode` (String) Backup mode (e.g. snapshot, suspend, stop). - `node` (String) Node on which the backup job runs. - `notes_template` (String) Template for backup notes. - `pool` (String) Pool whose members are backed up. - `protected` (Boolean) Indicates whether backups created by this job are protected from pruning. -- `prune_backups` (String) Prune options in the format `keep-last=N,...`. +- `prune_backups` (Map of String) Retention options as a map of keep policies. - `schedule` (String) Backup schedule in systemd calendar format. - `storage` (String) Target storage for the backup. - `vmid` (List of String) List of VM/CT IDs included in the backup job. diff --git a/docs/resources/backup_job.md b/docs/resources/backup_job.md index f455bfab4..0fbd6f29a 100644 --- a/docs/resources/backup_job.md +++ b/docs/resources/backup_job.md @@ -44,7 +44,7 @@ resource "proxmox_backup_job" "daily_backup" { - `ionice` (Number) I/O priority (0-8). - `lockwait` (Number) Maximum wait time in minutes for the global lock. - `mailnotification` (String) Email notification setting (always or failure). -- `mailto` (String) A comma-separated list of email addresses to send notifications to. +- `mailto` (List of String) A list of email addresses to send notifications to. - `maxfiles` (Number) Deprecated: use prune_backups instead. Maximum number of backup files per guest. - `mode` (String) The backup mode (snapshot, suspend, or stop). - `node` (String) The cluster node name to limit the backup job to. @@ -54,7 +54,7 @@ resource "proxmox_backup_job" "daily_backup" { - `pigz` (Number) Number of pigz threads (0 disables, 1 uses single-threaded gzip). - `pool` (String) Limit backup to guests in the specified pool. - `protected` (Boolean) Whether the backup should be marked as protected. -- `prune_backups` (String) Retention options as a comma-separated list of key=value pairs (e.g. keep-last=3,keep-weekly=2). +- `prune_backups` (Map of String) Retention options as a map of keep policies (e.g. keep-last = "3", keep-weekly = "2"). - `remove` (Boolean) Whether to remove old backups if there are more than maxfiles. - `repeat_missed` (Boolean) Whether to repeat missed backup jobs as soon as possible. - `script` (String) Path to a script to execute before/after the backup job. diff --git a/fwprovider/attribute/attribute.go b/fwprovider/attribute/attribute.go index 1b07c9169..8f461f5a6 100644 --- a/fwprovider/attribute/attribute.go +++ b/fwprovider/attribute/attribute.go @@ -84,6 +84,16 @@ func CheckDelete(planField, stateField attr.Value, toDelete *[]string, apiName s planIsEmpty = planIsEmpty || len(planSet.Elements()) == 0 } + // Special handling for types.List: treat empty list as null + if planList, ok := planField.(types.List); ok { + planIsEmpty = planIsEmpty || len(planList.Elements()) == 0 + } + + // Special handling for types.Map: treat empty map as null + if planMap, ok := planField.(types.Map); ok { + planIsEmpty = planIsEmpty || len(planMap.Elements()) == 0 + } + if planIsEmpty && !stateIsEmpty { *toDelete = append(*toDelete, apiName) } diff --git a/fwprovider/cluster/backup/datasource.go b/fwprovider/cluster/backup/datasource.go index 74b305b19..6a10ed03f 100644 --- a/fwprovider/cluster/backup/datasource.go +++ b/fwprovider/cluster/backup/datasource.go @@ -123,9 +123,10 @@ func (d *backupJobsDataSource) Schema( Description: "Compression algorithm used for the backup.", Computed: true, }, - "mailto": schema.StringAttribute{ - Description: "Comma-separated list of email addresses for notifications.", + "mailto": schema.ListAttribute{ + Description: "List of email addresses for notifications.", Computed: true, + ElementType: types.StringType, }, "mailnotification": schema.StringAttribute{ Description: "When to send email notifications (always or failure).", @@ -139,9 +140,10 @@ func (d *backupJobsDataSource) Schema( Description: "Pool whose members are backed up.", Computed: true, }, - "prune_backups": schema.StringAttribute{ - Description: "Prune options in the format `keep-last=N,...`.", + "prune_backups": schema.MapAttribute{ + Description: "Retention options as a map of keep policies.", Computed: true, + ElementType: types.StringType, }, "protected": schema.BoolAttribute{ Description: "Indicates whether backups created by this job are protected from pruning.", diff --git a/fwprovider/cluster/backup/model.go b/fwprovider/cluster/backup/model.go index ebb3032af..0b9ca16d5 100644 --- a/fwprovider/cluster/backup/model.go +++ b/fwprovider/cluster/backup/model.go @@ -8,6 +8,8 @@ package backup import ( "context" + "fmt" + "sort" "strings" "github.com/hashicorp/terraform-plugin-framework/attr" @@ -32,13 +34,13 @@ type backupJobModel struct { Compress types.String `tfsdk:"compress"` StartTime types.String `tfsdk:"starttime"` MaxFiles types.Int64 `tfsdk:"maxfiles"` - MailTo types.String `tfsdk:"mailto"` + MailTo types.List `tfsdk:"mailto"` MailNotification types.String `tfsdk:"mailnotification"` BwLimit types.Int64 `tfsdk:"bwlimit"` IONice types.Int64 `tfsdk:"ionice"` Pigz types.Int64 `tfsdk:"pigz"` Zstd types.Int64 `tfsdk:"zstd"` - PruneBackups types.String `tfsdk:"prune_backups"` + PruneBackups types.Map `tfsdk:"prune_backups"` Remove types.Bool `tfsdk:"remove"` NotesTemplate types.String `tfsdk:"notes_template"` Protected types.Bool `tfsdk:"protected"` @@ -101,6 +103,28 @@ func intPtrToInt64Ptr(v *int) *int64 { return &i } +// int64PtrToCustomIntPtr converts *int64 to *types.CustomInt for API fields. +func int64PtrToCustomIntPtr(v *int64) *proxmoxtypes.CustomInt { + if v == nil { + return nil + } + + i := proxmoxtypes.CustomInt(*v) + + return &i +} + +// customIntPtrToInt64Ptr converts *types.CustomInt to *int64 for Terraform state. +func customIntPtrToInt64Ptr(v *proxmoxtypes.CustomInt) *int64 { + if v == nil { + return nil + } + + i := int64(*v) + + return &i +} + func (m *backupJobModel) toCreateAPI(ctx context.Context, diags *diag.Diagnostics) *backup.CreateRequestBody { body := &backup.CreateRequestBody{} @@ -130,12 +154,6 @@ func (m *backupJobModel) toUpdateAPI( attribute.CheckDelete(m.Node, state.Node, &toDelete, "node") attribute.CheckDelete(m.VMIDs, state.VMIDs, &toDelete, "vmid") - // Also clear vmid when transitioning from non-empty to empty list - if !m.VMIDs.IsNull() && !m.VMIDs.IsUnknown() && len(m.VMIDs.Elements()) == 0 && - !state.VMIDs.IsNull() && len(state.VMIDs.Elements()) > 0 { - toDelete = append(toDelete, "vmid") - } - attribute.CheckDelete(m.Mode, state.Mode, &toDelete, "mode") attribute.CheckDelete(m.Compress, state.Compress, &toDelete, "compress") attribute.CheckDelete(m.StartTime, state.StartTime, &toDelete, "starttime") @@ -155,13 +173,6 @@ func (m *backupJobModel) toUpdateAPI( attribute.CheckDelete(m.StdExcludes, state.StdExcludes, &toDelete, "stdexcludes") attribute.CheckDelete(m.ExcludePath, state.ExcludePath, &toDelete, "exclude-path") - // Also clear exclude-path when transitioning from non-empty to empty list - // (CheckDelete only detects null, not empty list) - if !m.ExcludePath.IsNull() && !m.ExcludePath.IsUnknown() && len(m.ExcludePath.Elements()) == 0 && - !state.ExcludePath.IsNull() && len(state.ExcludePath.Elements()) > 0 { - toDelete = append(toDelete, "exclude-path") - } - attribute.CheckDelete(m.Pool, state.Pool, &toDelete, "pool") attribute.CheckDelete(m.Fleecing, state.Fleecing, &toDelete, "fleecing") attribute.CheckDelete(m.Performance, state.Performance, &toDelete, "performance") @@ -191,13 +202,49 @@ func (m *backupJobModel) fillCommonFields( common.Compress = attribute.StringPtrFromValue(m.Compress) common.StartTime = attribute.StringPtrFromValue(m.StartTime) common.MaxFiles = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.MaxFiles)) - common.MailTo = attribute.StringPtrFromValue(m.MailTo) + // MailTo: convert types.List to comma-separated string + if !m.MailTo.IsNull() && !m.MailTo.IsUnknown() { + var emails []string + + d := m.MailTo.ElementsAs(ctx, &emails, false) + diags.Append(d...) + + if !d.HasError() && len(emails) > 0 { + emailStr := strings.Join(emails, ",") + common.MailTo = &emailStr + } + } + common.MailNotification = attribute.StringPtrFromValue(m.MailNotification) common.BwLimit = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.BwLimit)) common.IONice = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.IONice)) common.Pigz = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.Pigz)) common.Zstd = int64PtrToIntPtr(attribute.Int64PtrFromValue(m.Zstd)) - common.PruneBackups = attribute.StringPtrFromValue(m.PruneBackups) + // PruneBackups: convert types.Map to comma-separated key=value string + if !m.PruneBackups.IsNull() && !m.PruneBackups.IsUnknown() { + pruneMap := make(map[string]string) + + d := m.PruneBackups.ElementsAs(ctx, &pruneMap, false) + diags.Append(d...) + + if !d.HasError() && len(pruneMap) > 0 { + keys := make([]string, 0, len(pruneMap)) + for k := range pruneMap { + keys = append(keys, k) + } + + sort.Strings(keys) + + parts := make([]string, 0, len(pruneMap)) + for _, k := range keys { + parts = append(parts, fmt.Sprintf("%s=%s", k, pruneMap[k])) + } + + pruneStr := strings.Join(parts, ",") + common.PruneBackups = &pruneStr + } + } + common.Remove = attribute.CustomBoolPtrFromValue(m.Remove) common.NotesTemplate = attribute.StringPtrFromValue(m.NotesTemplate) common.Protected = attribute.CustomBoolPtrFromValue(m.Protected) @@ -260,8 +307,8 @@ func (m *backupJobModel) fillCommonFields( if !d.HasError() { common.Performance = &backup.PerformanceConfig{ - MaxWorkers: int64PtrToIntPtr(perf.MaxWorkers.ValueInt64Pointer()), - PBSEntriesMax: int64PtrToIntPtr(perf.PBSEntriesMax.ValueInt64Pointer()), + MaxWorkers: int64PtrToCustomIntPtr(perf.MaxWorkers.ValueInt64Pointer()), + PBSEntriesMax: int64PtrToCustomIntPtr(perf.PBSEntriesMax.ValueInt64Pointer()), } } } @@ -289,7 +336,10 @@ func (m *backupJobModel) fromAPI( vmidValues[i] = types.StringValue(strings.TrimSpace(id)) } - m.VMIDs, _ = types.ListValue(types.StringType, vmidValues) + listVal, d := types.ListValue(types.StringType, vmidValues) + diags.Append(d...) + + m.VMIDs = listVal } else { m.VMIDs = types.ListNull(types.StringType) } @@ -298,7 +348,24 @@ func (m *backupJobModel) fromAPI( m.Compress = types.StringPointerValue(data.Compress) m.StartTime = types.StringPointerValue(data.StartTime) m.MaxFiles = types.Int64PointerValue(intPtrToInt64Ptr(data.MaxFiles)) - m.MailTo = types.StringPointerValue(data.MailTo) + + // MailTo: convert comma-separated string to list + if data.MailTo != nil && *data.MailTo != "" { + emails := strings.Split(*data.MailTo, ",") + emailValues := make([]attr.Value, len(emails)) + + for i, e := range emails { + emailValues[i] = types.StringValue(strings.TrimSpace(e)) + } + + listVal, d := types.ListValue(types.StringType, emailValues) + diags.Append(d...) + + m.MailTo = listVal + } else { + m.MailTo = types.ListNull(types.StringType) + } + m.MailNotification = types.StringPointerValue(data.MailNotification) m.BwLimit = types.Int64PointerValue(intPtrToInt64Ptr(data.BwLimit)) m.IONice = types.Int64PointerValue(intPtrToInt64Ptr(data.IONice)) @@ -316,11 +383,28 @@ func (m *backupJobModel) fromAPI( m.StopWait = types.Int64PointerValue(intPtrToInt64Ptr(data.StopWait)) m.TmpDir = types.StringPointerValue(data.TmpDir) - // PruneBackups + // PruneBackups: convert comma-separated key=value string to map if data.PruneBackups != nil { - m.PruneBackups = types.StringPointerValue(data.PruneBackups.Pointer()) + s := data.PruneBackups.Pointer() + if s != nil && *s != "" { + mapValues := make(map[string]attr.Value) + + for part := range strings.SplitSeq(*s, ",") { + kv := strings.SplitN(strings.TrimSpace(part), "=", 2) + if len(kv) == 2 { + mapValues[kv[0]] = types.StringValue(kv[1]) + } + } + + mapVal, d := types.MapValue(types.StringType, mapValues) + diags.Append(d...) + + m.PruneBackups = mapVal + } else { + m.PruneBackups = types.MapNull(types.StringType) + } } else { - m.PruneBackups = types.StringNull() + m.PruneBackups = types.MapNull(types.StringType) } // ExcludePath: convert CustomCommaSeparatedList to types.List @@ -356,8 +440,8 @@ func (m *backupJobModel) fromAPI( // Performance: convert to types.Object if data.Performance != nil { perfVal := performanceModel{ - MaxWorkers: types.Int64PointerValue(intPtrToInt64Ptr(data.Performance.MaxWorkers)), - PBSEntriesMax: types.Int64PointerValue(intPtrToInt64Ptr(data.Performance.PBSEntriesMax)), + MaxWorkers: types.Int64PointerValue(customIntPtrToInt64Ptr(data.Performance.MaxWorkers)), + PBSEntriesMax: types.Int64PointerValue(customIntPtrToInt64Ptr(data.Performance.PBSEntriesMax)), } obj, d := types.ObjectValueFrom(ctx, performanceAttrTypes(), perfVal) @@ -382,11 +466,11 @@ type backupJobDatasourceModel struct { All types.Bool `tfsdk:"all"` Mode types.String `tfsdk:"mode"` Compress types.String `tfsdk:"compress"` - MailTo types.String `tfsdk:"mailto"` + MailTo types.List `tfsdk:"mailto"` MailNotification types.String `tfsdk:"mailnotification"` NotesTemplate types.String `tfsdk:"notes_template"` Pool types.String `tfsdk:"pool"` - PruneBackups types.String `tfsdk:"prune_backups"` + PruneBackups types.Map `tfsdk:"prune_backups"` Protected types.Bool `tfsdk:"protected"` } @@ -399,7 +483,21 @@ func (m *backupJobDatasourceModel) fromAPI(data *backup.GetResponseData) { m.All = types.BoolPointerValue(data.All.PointerBool()) m.Mode = types.StringPointerValue(data.Mode) m.Compress = types.StringPointerValue(data.Compress) - m.MailTo = types.StringPointerValue(data.MailTo) + + // MailTo: convert comma-separated string to list + if data.MailTo != nil && *data.MailTo != "" { + emails := strings.Split(*data.MailTo, ",") + emailValues := make([]attr.Value, len(emails)) + + for i, e := range emails { + emailValues[i] = types.StringValue(strings.TrimSpace(e)) + } + + m.MailTo = types.ListValueMust(types.StringType, emailValues) + } else { + m.MailTo = types.ListNull(types.StringType) + } + m.MailNotification = types.StringPointerValue(data.MailNotification) m.NotesTemplate = types.StringPointerValue(data.NotesTemplate) m.Pool = types.StringPointerValue(data.Pool) @@ -414,14 +512,29 @@ func (m *backupJobDatasourceModel) fromAPI(data *backup.GetResponseData) { vmidValues[i] = types.StringValue(strings.TrimSpace(id)) } - m.VMIDs, _ = types.ListValue(types.StringType, vmidValues) + m.VMIDs = types.ListValueMust(types.StringType, vmidValues) } else { m.VMIDs = types.ListNull(types.StringType) } + // PruneBackups: convert comma-separated key=value string to map if data.PruneBackups != nil { - m.PruneBackups = types.StringPointerValue(data.PruneBackups.Pointer()) + s := data.PruneBackups.Pointer() + if s != nil && *s != "" { + mapValues := make(map[string]attr.Value) + + for part := range strings.SplitSeq(*s, ",") { + kv := strings.SplitN(strings.TrimSpace(part), "=", 2) + if len(kv) == 2 { + mapValues[kv[0]] = types.StringValue(kv[1]) + } + } + + m.PruneBackups = types.MapValueMust(types.StringType, mapValues) + } else { + m.PruneBackups = types.MapNull(types.StringType) + } } else { - m.PruneBackups = types.StringNull() + m.PruneBackups = types.MapNull(types.StringType) } } diff --git a/fwprovider/cluster/backup/resource.go b/fwprovider/cluster/backup/resource.go index 4d37273e2..003c0c5cc 100644 --- a/fwprovider/cluster/backup/resource.go +++ b/fwprovider/cluster/backup/resource.go @@ -12,9 +12,14 @@ import ( "fmt" "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" @@ -26,9 +31,10 @@ import ( ) var ( - _ resource.Resource = &backupJobResource{} - _ resource.ResourceWithConfigure = &backupJobResource{} - _ resource.ResourceWithImportState = &backupJobResource{} + _ resource.Resource = &backupJobResource{} + _ resource.ResourceWithConfigure = &backupJobResource{} + _ resource.ResourceWithImportState = &backupJobResource{} + _ resource.ResourceWithConfigValidators = &backupJobResource{} ) type backupJobResource struct { @@ -70,6 +76,23 @@ func (r *backupJobResource) Configure( r.client = cfg.Client.Cluster().Backup() } +func (r *backupJobResource) ConfigValidators(_ context.Context) []resource.ConfigValidator { + return []resource.ConfigValidator{ + resourcevalidator.Conflicting( + path.MatchRoot("all"), + path.MatchRoot("vmid"), + ), + resourcevalidator.Conflicting( + path.MatchRoot("all"), + path.MatchRoot("pool"), + ), + resourcevalidator.Conflicting( + path.MatchRoot("vmid"), + path.MatchRoot("pool"), + ), + } +} + func (r *backupJobResource) Schema( _ context.Context, _ resource.SchemaRequest, @@ -97,6 +120,9 @@ func (r *backupJobResource) Schema( Description: "Whether the backup job is enabled.", Optional: true, Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, }, "node": schema.StringAttribute{ Description: "The cluster node name to limit the backup job to.", @@ -111,6 +137,9 @@ func (r *backupJobResource) Schema( Description: "Whether to back up all known guests on the node.", Optional: true, Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, }, "mode": schema.StringAttribute{ Description: "The backup mode (snapshot, suspend, or stop).", @@ -119,6 +148,9 @@ func (r *backupJobResource) Schema( Validators: []validator.String{ stringvalidator.OneOf("snapshot", "suspend", "stop"), }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, "compress": schema.StringAttribute{ Description: "The compression algorithm (0, gzip, lzo, or zstd).", @@ -127,6 +159,9 @@ func (r *backupJobResource) Schema( Validators: []validator.String{ stringvalidator.OneOf("0", "1", "gzip", "lzo", "zstd"), }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, "starttime": schema.StringAttribute{ Description: "The scheduled start time (HH:MM).", @@ -136,9 +171,10 @@ func (r *backupJobResource) Schema( Description: "Deprecated: use prune_backups instead. Maximum number of backup files per guest.", Optional: true, }, - "mailto": schema.StringAttribute{ - Description: "A comma-separated list of email addresses to send notifications to.", + "mailto": schema.ListAttribute{ + Description: "A list of email addresses to send notifications to.", Optional: true, + ElementType: types.StringType, }, "mailnotification": schema.StringAttribute{ Description: "Email notification setting (always or failure).", @@ -147,11 +183,17 @@ func (r *backupJobResource) Schema( Validators: []validator.String{ stringvalidator.OneOf("always", "failure"), }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, "bwlimit": schema.Int64Attribute{ Description: "I/O bandwidth limit in KiB/s.", Optional: true, Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, }, "ionice": schema.Int64Attribute{ Description: "I/O priority (0-8).", @@ -160,6 +202,9 @@ func (r *backupJobResource) Schema( Validators: []validator.Int64{ int64validator.Between(0, 8), }, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, }, "pigz": schema.Int64Attribute{ Description: "Number of pigz threads (0 disables, 1 uses single-threaded gzip).", @@ -169,16 +214,23 @@ func (r *backupJobResource) Schema( Description: "Number of zstd threads (0 uses half of available cores).", Optional: true, }, - "prune_backups": schema.StringAttribute{ - Description: "Retention options as a comma-separated list of key=value pairs " + - "(e.g. keep-last=3,keep-weekly=2).", - Optional: true, - Computed: true, + "prune_backups": schema.MapAttribute{ + Description: "Retention options as a map of keep policies " + + "(e.g. keep-last = \"3\", keep-weekly = \"2\").", + Optional: true, + Computed: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Map{ + mapplanmodifier.UseStateForUnknown(), + }, }, "remove": schema.BoolAttribute{ Description: "Whether to remove old backups if there are more than maxfiles.", Optional: true, Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, }, "notes_template": schema.StringAttribute{ Description: "Template for notes attached to the backup.", @@ -188,11 +240,17 @@ func (r *backupJobResource) Schema( Description: "Whether the backup should be marked as protected.", Optional: true, Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, }, "repeat_missed": schema.BoolAttribute{ Description: "Whether to repeat missed backup jobs as soon as possible.", Optional: true, Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, }, "script": schema.StringAttribute{ Description: "Path to a script to execute before/after the backup job.", @@ -202,6 +260,9 @@ func (r *backupJobResource) Schema( Description: "Whether to exclude common temporary files from the backup.", Optional: true, Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, }, "exclude_path": schema.ListAttribute{ Description: "A list of paths to exclude from the backup.", @@ -220,6 +281,9 @@ func (r *backupJobResource) Schema( Description: "Whether fleecing is enabled.", Optional: true, Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, }, "storage": schema.StringAttribute{ Description: "The storage identifier for fleecing.", diff --git a/fwprovider/cluster/backup/resource_test.go b/fwprovider/cluster/backup/resource_test.go index 3cade4f43..67437a6b5 100644 --- a/fwprovider/cluster/backup/resource_test.go +++ b/fwprovider/cluster/backup/resource_test.go @@ -98,13 +98,14 @@ func TestAccResourceBackupJob(t *testing.T) { all = true mode = "snapshot" compress = "zstd" - mailto = "test@example.com" + mailto = ["test@example.com"] }`), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "id", "acc-test-del"), resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "mode", "snapshot"), resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "compress", "zstd"), - resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "mailto", "test@example.com"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "mailto.#", "1"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "mailto.0", "test@example.com"), ), }, { @@ -118,9 +119,7 @@ func TestAccResourceBackupJob(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "id", "acc-test-del"), resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "all", "true"), - test.NoResourceAttributesSet("proxmox_backup_job.test_delete", []string{ - "mailto", - }), + resource.TestCheckResourceAttr("proxmox_backup_job.test_delete", "mailto.#", "0"), ), }, }}, @@ -174,14 +173,81 @@ func TestAccResourceBackupJob(t *testing.T) { schedule = "*-*-* 07:00" storage = "local" all = true - prune_backups = "keep-daily=7,keep-last=3" + prune_backups = { + keep-daily = "7" + keep-last = "3" + } }`), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("proxmox_backup_job.test_retention", "id", "acc-test-ret"), - resource.TestCheckResourceAttr("proxmox_backup_job.test_retention", "prune_backups", "keep-daily=7,keep-last=3"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_retention", "prune_backups.%", "2"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_retention", "prune_backups.keep-daily", "7"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_retention", "prune_backups.keep-last", "3"), ), }, }}, + {"backup with fleecing", []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_fleecing" { + id = "acc-test-flc" + schedule = "*-*-* 09:00" + storage = "local" + all = true + fleecing = { + enabled = true + storage = "local" + } + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test_fleecing", "id", "acc-test-flc"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_fleecing", "fleecing.enabled", "true"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_fleecing", "fleecing.storage", "local"), + ), + }, + }}, + {"backup with performance settings", []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_perf" { + id = "acc-test-perf" + schedule = "*-*-* 10:00" + storage = "local" + all = true + performance = { + max_workers = 2 + } + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test_perf", "id", "acc-test-perf"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_perf", "performance.max_workers", "2"), + ), + }, + }}, + {"backup with multiple mailto addresses", []resource.TestStep{ + { + Config: te.RenderConfig(` + resource "proxmox_backup_job" "test_mailto" { + id = "acc-test-mail" + schedule = "*-*-* 11:00" + storage = "local" + all = true + mailto = ["admin@example.com", "ops@example.com"] + }`), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("proxmox_backup_job.test_mailto", "id", "acc-test-mail"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_mailto", "mailto.#", "2"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_mailto", "mailto.0", "admin@example.com"), + resource.TestCheckResourceAttr("proxmox_backup_job.test_mailto", "mailto.1", "ops@example.com"), + ), + }, + { + ResourceName: "proxmox_backup_job.test_mailto", + ImportStateId: "acc-test-mail", + ImportState: true, + ImportStateVerify: true, + }, + }}, } for _, tt := range tests { @@ -195,9 +261,11 @@ func TestAccResourceBackupJob(t *testing.T) { } func TestAccDataSourceBackupJobs(t *testing.T) { + t.Parallel() + te := test.InitEnvironment(t) - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: te.AccProviders, Steps: []resource.TestStep{ { diff --git a/proxmox/cluster/backup/backup_types.go b/proxmox/cluster/backup/backup_types.go index ce3dbd477..76fec613d 100644 --- a/proxmox/cluster/backup/backup_types.go +++ b/proxmox/cluster/backup/backup_types.go @@ -215,8 +215,8 @@ func (f *FleecingConfig) EncodeValues(key string, v *url.Values) error { // PerformanceConfig contains the performance configuration for a backup job. type PerformanceConfig struct { - MaxWorkers *int `json:"max-workers,omitempty" url:"max-workers,omitempty"` - PBSEntriesMax *int `json:"pbs-entries-max,omitempty" url:"pbs-entries-max,omitempty"` + MaxWorkers *types.CustomInt `json:"max-workers,omitempty" url:"max-workers,omitempty"` + PBSEntriesMax *types.CustomInt `json:"pbs-entries-max,omitempty" url:"pbs-entries-max,omitempty"` } // EncodeValues encodes the PerformanceConfig into URL values as a comma-separated key=value string. @@ -224,11 +224,11 @@ func (p *PerformanceConfig) EncodeValues(key string, v *url.Values) error { var parts []string if p.MaxWorkers != nil { - parts = append(parts, fmt.Sprintf("max-workers=%d", *p.MaxWorkers)) + parts = append(parts, fmt.Sprintf("max-workers=%d", int(*p.MaxWorkers))) } if p.PBSEntriesMax != nil { - parts = append(parts, fmt.Sprintf("pbs-entries-max=%d", *p.PBSEntriesMax)) + parts = append(parts, fmt.Sprintf("pbs-entries-max=%d", int(*p.PBSEntriesMax))) } if len(parts) > 0 { From 86b042ade5df129fa365a1eddee6b7b5af5ad108 Mon Sep 17 00:00:00 2001 From: Pavel Boldyrev Date: Sat, 21 Mar 2026 14:25:19 -0400 Subject: [PATCH 3/3] moar comments Signed-off-by: Pavel Boldyrev --- docs/data-sources/backup_jobs.md | 2 +- docs/resources/backup_job.md | 4 ++-- fwprovider/cluster/backup/datasource.go | 3 ++- fwprovider/cluster/backup/resource.go | 4 ++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/data-sources/backup_jobs.md b/docs/data-sources/backup_jobs.md index 4212408d3..059a69333 100644 --- a/docs/data-sources/backup_jobs.md +++ b/docs/data-sources/backup_jobs.md @@ -41,7 +41,7 @@ Read-Only: - `notes_template` (String) Template for backup notes. - `pool` (String) Pool whose members are backed up. - `protected` (Boolean) Indicates whether backups created by this job are protected from pruning. -- `prune_backups` (Map of String) Retention options as a map of keep policies. +- `prune_backups` (Map of String) Retention options as a map of keep policies (e.g. keep-last = "3", keep-weekly = "2"). - `schedule` (String) Backup schedule in systemd calendar format. - `storage` (String) Target storage for the backup. - `vmid` (List of String) List of VM/CT IDs included in the backup job. diff --git a/docs/resources/backup_job.md b/docs/resources/backup_job.md index 0fbd6f29a..420b75ac6 100644 --- a/docs/resources/backup_job.md +++ b/docs/resources/backup_job.md @@ -30,14 +30,14 @@ resource "proxmox_backup_job" "daily_backup" { ### Required - `id` (String) The identifier of the backup job. -- `schedule` (String) Backup schedule in cron format or systemd calendar event. +- `schedule` (String) Backup schedule in systemd calendar event format. - `storage` (String) The storage identifier for the backup. ### Optional - `all` (Boolean) Whether to back up all known guests on the node. - `bwlimit` (Number) I/O bandwidth limit in KiB/s. -- `compress` (String) The compression algorithm (0, gzip, lzo, or zstd). +- `compress` (String) The compression algorithm (0, 1, gzip, lzo, or zstd). - `enabled` (Boolean) Whether the backup job is enabled. - `exclude_path` (List of String) A list of paths to exclude from the backup. - `fleecing` (Attributes) Fleecing configuration for the backup job. (see [below for nested schema](#nestedatt--fleecing)) diff --git a/fwprovider/cluster/backup/datasource.go b/fwprovider/cluster/backup/datasource.go index 6a10ed03f..89c1717f8 100644 --- a/fwprovider/cluster/backup/datasource.go +++ b/fwprovider/cluster/backup/datasource.go @@ -141,7 +141,8 @@ func (d *backupJobsDataSource) Schema( Computed: true, }, "prune_backups": schema.MapAttribute{ - Description: "Retention options as a map of keep policies.", + Description: "Retention options as a map of keep policies " + + "(e.g. keep-last = \"3\", keep-weekly = \"2\").", Computed: true, ElementType: types.StringType, }, diff --git a/fwprovider/cluster/backup/resource.go b/fwprovider/cluster/backup/resource.go index 003c0c5cc..75f55b3de 100644 --- a/fwprovider/cluster/backup/resource.go +++ b/fwprovider/cluster/backup/resource.go @@ -109,7 +109,7 @@ func (r *backupJobResource) Schema( }, }, "schedule": schema.StringAttribute{ - Description: "Backup schedule in cron format or systemd calendar event.", + Description: "Backup schedule in systemd calendar event format.", Required: true, }, "storage": schema.StringAttribute{ @@ -153,7 +153,7 @@ func (r *backupJobResource) Schema( }, }, "compress": schema.StringAttribute{ - Description: "The compression algorithm (0, gzip, lzo, or zstd).", + Description: "The compression algorithm (0, 1, gzip, lzo, or zstd).", Optional: true, Computed: true, Validators: []validator.String{