Skip to content
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
9ad79ea
feat(core): add TaskResult type for task warnings
bpg-dev Apr 3, 2026
0f29930
feat(core): WaitForTask returns *TaskResult instead of error
bpg-dev Apr 3, 2026
238e189
refactor(core): update all WaitForTask callers to use TaskResult
bpg-dev Apr 3, 2026
2c8ab07
refactor(vm): update StartVM callers to use TaskResult
bpg-dev Apr 3, 2026
7e8cc89
feat(core): propagate task warnings through DoTask callers
bpg-dev Apr 3, 2026
ba75587
fix(core): review fixes for TaskResult implementation
bpg-dev Apr 3, 2026
a580e11
refactor(core): address review comments
bpg-dev Apr 3, 2026
6899a3f
feat(vm): propagate task warnings from all VM operations
bpg-dev Apr 3, 2026
356521e
cleanup
bpg-dev Apr 3, 2026
99e9629
simplification
bpg-dev Apr 3, 2026
d15dd1c
ignore warnings now by default
bpg-dev Apr 4, 2026
c53cb57
address gemini feedback
bpg-dev Apr 4, 2026
22c05d3
fix warning handling in stop / shutdown
bpg-dev Apr 4, 2026
8c6a21b
moar cleanups
bpg-dev Apr 4, 2026
236c1ee
diages handling improvements
bpg-dev Apr 4, 2026
e9bea6b
Merge branch 'main' into feat/2597-include-proxmox-task-results-durin…
bpg-dev Apr 4, 2026
169e22e
review fixes
bpg-dev Apr 4, 2026
e041412
fix propagating a some vm warnings
bpg-dev Apr 4, 2026
564d2c7
relocate common package
bpg-dev Apr 4, 2026
50dffa4
fix result re-wrap
bpg-dev Apr 4, 2026
84ec285
fix doc ````
bpg-dev Apr 4, 2026
f4f5364
more poliching and fixes
bpg-dev Apr 4, 2026
d1258f7
few more tests
bpg-dev Apr 4, 2026
7bb6a63
hope this is the last one
bpg-dev Apr 4, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 44 additions & 44 deletions fwprovider/nodes/clonedvm/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"github.com/bpg/terraform-provider-proxmox/proxmox"
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
"github.com/bpg/terraform-provider-proxmox/proxmox/cluster"
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/tasks"
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/vms"
proxmoxtypes "github.com/bpg/terraform-provider-proxmox/proxmox/types"
)
Expand Down Expand Up @@ -160,9 +161,8 @@ func (r *Resource) Create(ctx context.Context, req resource.CreateRequest, resp

retries := int(plan.Clone.Retries.ValueInt64())

err := sourceVM.CloneVM(ctx, retries, cloneBody)
if err != nil {
resp.Diagnostics.AddError("Failed to clone VM", err.Error())
cloneResult := sourceVM.CloneVM(ctx, retries, cloneBody)
if cloneResult.AddDiags(&resp.Diagnostics, "VM clone") {
return
}

Expand Down Expand Up @@ -193,14 +193,12 @@ func (r *Resource) Create(ctx context.Context, req resource.CreateRequest, resp
if plan.Started.ValueBool() {
tflog.Debug(ctx, "Starting VM after clone")

_, err = vmAPI.StartVM(ctx, int(timeout.Seconds()))
if err != nil {
resp.Diagnostics.AddError("Failed to start VM", err.Error())
startResult := vmAPI.StartVM(ctx, int(timeout.Seconds()))
if startResult.AddDiags(&resp.Diagnostics, "VM start") {
return
}

err = vmAPI.WaitForVMStatus(ctx, "running")
if err != nil {
if err := vmAPI.WaitForVMStatus(ctx, "running"); err != nil {
resp.Diagnostics.AddError("Failed waiting for VM to start", err.Error())
return
}
Expand Down Expand Up @@ -303,26 +301,22 @@ func (r *Resource) Update(ctx context.Context, req resource.UpdateRequest, resp
if plan.Started.ValueBool() {
tflog.Debug(ctx, "Starting VM")

_, err = vmAPI.StartVM(ctx, int(timeout.Seconds()))
if err != nil {
resp.Diagnostics.AddError("Failed to start VM", err.Error())
startResult := vmAPI.StartVM(ctx, int(timeout.Seconds()))
if startResult.AddDiags(&resp.Diagnostics, "VM start") {
return
}

err = vmAPI.WaitForVMStatus(ctx, "running")
if err != nil {
if err := vmAPI.WaitForVMStatus(ctx, "running"); err != nil {
resp.Diagnostics.AddError("Failed waiting for VM to start", err.Error())
return
}
} else {
if plan.StopOnDestroy.ValueBool() {
if err = vmStop(ctx, vmAPI); err != nil {
resp.Diagnostics.AddError("Failed to stop VM", err.Error())
if vmStop(ctx, vmAPI).AddDiags(&resp.Diagnostics, "VM stop") {
return
}
} else {
if err = vmShutdown(ctx, vmAPI); err != nil {
resp.Diagnostics.AddError("Failed to shut down VM", err.Error())
if vmShutdown(ctx, vmAPI).AddDiags(&resp.Diagnostics, "VM shutdown") {
return
}
}
Expand Down Expand Up @@ -377,20 +371,17 @@ func (r *Resource) Delete(ctx context.Context, req resource.DeleteRequest, resp
}

if status != nil && status.Status != "stopped" {
// Stop/shutdown failures during delete are non-fatal — reported as warnings.
if state.StopOnDestroy.ValueBool() {
if e := vmStop(ctx, vmAPI); e != nil {
resp.Diagnostics.AddWarning("Failed to stop VM", e.Error())
}
vmStop(ctx, vmAPI).AddDiagsAsWarnings(&resp.Diagnostics, "VM stop/shutdown")
} else {
if e := vmShutdown(ctx, vmAPI); e != nil {
resp.Diagnostics.AddWarning("Failed to shut down VM", e.Error())
}
vmShutdown(ctx, vmAPI).AddDiagsAsWarnings(&resp.Diagnostics, "VM stop/shutdown")
}
}

err = vmAPI.DeleteVM(ctx, state.PurgeOnDestroy.ValueBool(), state.DeleteUnreferencedDisksOnDestroy.ValueBool())
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
resp.Diagnostics.AddError("Failed to delete VM", err.Error())
deleteResult := vmAPI.DeleteVM(ctx, state.PurgeOnDestroy.ValueBool(), state.DeleteUnreferencedDisksOnDestroy.ValueBool())
if deleteResult.Err() != nil && !errors.Is(deleteResult.Err(), api.ErrResourceDoesNotExist) {
resp.Diagnostics.AddError("Failed to delete VM", deleteResult.Err().Error())
}

if resp.Diagnostics.HasError() {
Expand Down Expand Up @@ -583,8 +574,7 @@ func applyManaged(ctx context.Context, vmAPI *vms.Client, plan Model, currentCon
}

for _, resize := range diskResizes {
if err := vmAPI.ResizeVMDisk(ctx, resize); err != nil {
diags.AddError("Failed to resize VM disk", err.Error())
if vmAPI.ResizeVMDisk(ctx, resize).AddDiags(diags, "VM disk resize") {
return
}
}
Expand Down Expand Up @@ -1016,7 +1006,7 @@ func isValidDiskSlot(slot string) bool {
}

// Shutdown the VM, then wait for it to actually shut down.
func vmShutdown(ctx context.Context, vmAPI *vms.Client) error {
func vmShutdown(ctx context.Context, vmAPI *vms.Client) tasks.TaskResult {
tflog.Debug(ctx, "Shutting down VM")

shutdownTimeoutSec := int(defaultShutdownTimeout.Seconds())
Expand All @@ -1025,35 +1015,45 @@ func vmShutdown(ctx context.Context, vmAPI *vms.Client) error {
shutdownTimeoutSec = int(time.Until(dl).Seconds())
}

err := vmAPI.ShutdownVM(ctx, &vms.ShutdownRequestBody{
result := vmAPI.ShutdownVM(ctx, &vms.ShutdownRequestBody{
ForceStop: proxmoxtypes.CustomBool(true).Pointer(),
Timeout: &shutdownTimeoutSec,
})
if err != nil {
return fmt.Errorf("failed to initiate shut down of VM: %w", err)
if result.Err() != nil {
return tasks.TaskFailedWithWarnings(
fmt.Errorf("failed to initiate shut down of VM: %w", result.Err()),
result.Warnings(),
)
}

err = vmAPI.WaitForVMStatus(ctx, "stopped")
if err != nil {
return fmt.Errorf("failed to wait for VM to shut down: %w", err)
if err := vmAPI.WaitForVMStatus(ctx, "stopped"); err != nil {
return tasks.TaskFailedWithWarnings(
fmt.Errorf("failed to wait for VM to shut down: %w", err),
result.Warnings(),
)
}

return nil
return result
}

// Forcefully stop the VM, then wait for it to actually stop.
func vmStop(ctx context.Context, vmAPI *vms.Client) error {
func vmStop(ctx context.Context, vmAPI *vms.Client) tasks.TaskResult {
tflog.Debug(ctx, "Stopping VM")

err := vmAPI.StopVM(ctx)
if err != nil {
return fmt.Errorf("failed to initiate stop of VM: %w", err)
result := vmAPI.StopVM(ctx)
if result.Err() != nil {
return tasks.TaskFailedWithWarnings(
fmt.Errorf("failed to initiate stop of VM: %w", result.Err()),
result.Warnings(),
)
}

err = vmAPI.WaitForVMStatus(ctx, "stopped")
if err != nil {
return fmt.Errorf("failed to wait for VM to stop: %w", err)
if err := vmAPI.WaitForVMStatus(ctx, "stopped"); err != nil {
return tasks.TaskFailedWithWarnings(
fmt.Errorf("failed to wait for VM to stop: %w", err),
result.Warnings(),
)
}

return nil
return result
}
4 changes: 2 additions & 2 deletions fwprovider/nodes/resource_acme_certificate.go
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ func (r *acmeCertificateResource) Create(

// Wait for the task to complete
if taskID != nil && *taskID != "" {
err = nodeClient.Tasks().WaitForTask(ctx, *taskID)
err = nodeClient.Tasks().WaitForTask(ctx, *taskID).Err()
if err != nil {
resp.Diagnostics.AddError(
"Certificate order task failed",
Expand Down Expand Up @@ -417,7 +417,7 @@ func (r *acmeCertificateResource) Update(

// Wait for the task to complete
if taskID != nil && *taskID != "" {
err = nodeClient.Tasks().WaitForTask(ctx, *taskID)
err = nodeClient.Tasks().WaitForTask(ctx, *taskID).Err()
if err != nil {
resp.Diagnostics.AddError(
"Certificate renewal task failed",
Expand Down
26 changes: 15 additions & 11 deletions fwprovider/nodes/vm/concurrency_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,15 @@ func TestBatchCreate(t *testing.T) {
sourceID, err := gen.NextID(ctx)
require.NoError(t, err)

err = te.NodeClient().VM(0).CreateVM(ctx, &vms.CreateRequestBody{VMID: sourceID})
createResult := te.NodeClient().VM(0).CreateVM(ctx, &vms.CreateRequestBody{VMID: sourceID})

require.NoError(t, err, "failed to create VM %d", sourceID)
require.NoError(t, createResult.Err(), "failed to create VM %d", sourceID)

ids := make([]int, numVMs)

t.Cleanup(func() {
if err := te.NodeClient().VM(sourceID).DeleteVM(ctx, true, true); err != nil {
t.Logf("cleanup warning: failed to delete source VM %d: %v", sourceID, err)
if result := te.NodeClient().VM(sourceID).DeleteVM(ctx, true, true); result.Err() != nil {
t.Logf("cleanup warning: failed to delete source VM %d: %v", sourceID, result.Err())
}

var wg sync.WaitGroup
Expand All @@ -64,8 +64,8 @@ func TestBatchCreate(t *testing.T) {
defer wg.Done()

if id > 0 {
if err := te.NodeClient().VM(id).DeleteVM(ctx, true, true); err != nil {
t.Logf("cleanup warning: failed to delete VM %d: %v", id, err)
if result := te.NodeClient().VM(id).DeleteVM(ctx, true, true); result.Err() != nil {
t.Logf("cleanup warning: failed to delete VM %d: %v", id, result.Err())
}
}
}()
Expand All @@ -76,21 +76,25 @@ func TestBatchCreate(t *testing.T) {

var wg sync.WaitGroup

errs := make([]error, numVMs)

for i := range numVMs {
wg.Add(1)

go func() {
defer wg.Done()

id := 999900 + i
if err == nil {
err = te.NodeClient().VM(sourceID).CloneVM(ctx, 5, &vms.CloneRequestBody{VMIDNew: id})
ids[i] = id
}
cloneResult := te.NodeClient().VM(sourceID).CloneVM(ctx, 5, &vms.CloneRequestBody{VMIDNew: id})

assert.NoError(t, err)
errs[i] = cloneResult.Err()
ids[i] = id
}()
}

wg.Wait()

for i, cloneErr := range errs {
assert.NoError(t, cloneErr, "clone VM %d failed", ids[i])
}
}
Loading
Loading