Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(instance): Fix block volume detach when no additional volumes remaining #789

Merged
merged 10 commits into from
Jun 3, 2022
6 changes: 5 additions & 1 deletion scaleway/resource_instance_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -677,12 +677,16 @@ func resourceScalewayInstanceServerUpdate(ctx context.Context, d *schema.Resourc

volumes := map[string]*instance.VolumeServerTemplate{}

if raw, ok := d.GetOk("additional_volume_ids"); d.HasChange("additional_volume_ids") && ok {
if raw, hasVolumes := d.GetOk("additional_volume_ids"); d.HasChange("additional_volume_ids") {
volumes["0"] = &instance.VolumeServerTemplate{
ID: expandZonedID(d.Get("root_volume.0.volume_id")).ID,
Name: newRandomName("vol"), // name is ignored by the API, any name will work here
}

if !hasVolumes {
raw = []interface{}{} // Set an empty list if not volumes exist
}

for i, volumeID := range raw.([]interface{}) {
volumeHasChange := d.HasChange("additional_volume_ids." + strconv.Itoa(i))
// local volumes can only be added when the instance is stopped
Expand Down
67 changes: 67 additions & 0 deletions scaleway/resource_instance_server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,73 @@ func TestAccScalewayInstanceServer_AdditionalVolumes(t *testing.T) {
})
}

func TestAccScalewayInstanceServer_AdditionalVolumesDetach(t *testing.T) {
tt := NewTestTools(t)
defer tt.Cleanup()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: tt.ProviderFactories,
CheckDestroy: resource.ComposeTestCheckFunc(
testAccCheckScalewayInstanceVolumeDestroy(tt),
testAccCheckScalewayInstanceServerDestroy(tt),
),
Steps: []resource.TestStep{
{
Config: `
variable "zone" {
type = string
default = "fr-par-1"
}

resource "scaleway_instance_volume" "main" {
type = "b_ssd"
name = "foobar"
size_in_gb = 1
}

resource "scaleway_instance_server" "main" {
type = "DEV1-S"
image = "ubuntu_focal"
name = "foobar"

enable_dynamic_ip = true

additional_volume_ids = [scaleway_instance_volume.main.id]
}
`,
},
{
Config: `
variable "zone" {
type = string
default = "fr-par-1"
}

resource "scaleway_instance_volume" "main" {
type = "b_ssd"
name = "foobar"
size_in_gb = 1
}

resource "scaleway_instance_server" "main" {
type = "DEV1-S"
image = "ubuntu_focal"
name = "foobar"

enable_dynamic_ip = true

additional_volume_ids = []
}
`,
Check: resource.ComposeTestCheckFunc(
testAccCheckScalewayInstanceVolumeExists(tt, "scaleway_instance_volume.main"),
resource.TestCheckResourceAttr("scaleway_instance_server.main", "additional_volume_ids.#", "0"),
),
},
},
})
}

func TestAccScalewayInstanceServer_WithPlacementGroup(t *testing.T) {
tt := NewTestTools(t)
defer tt.Cleanup()
Expand Down
12 changes: 7 additions & 5 deletions scaleway/resource_instance_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,18 +238,20 @@ func resourceScalewayInstanceVolumeDelete(ctx context.Context, d *schema.Resourc
}

err = resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
volumeResp, err := instanceAPI.GetVolume(&instance.GetVolumeRequest{
Zone: zone,
VolumeID: id,
})
volume, err := instanceAPI.WaitForVolume(&instance.WaitForVolumeRequest{
Zone: zone,
VolumeID: id,
RetryInterval: DefaultWaitRetryInterval,
Timeout: scw.TimeDurationPtr(d.Timeout(schema.TimeoutCreate)),
}, scw.WithContext(ctx))
if err != nil {
if is404Error(err) {
return nil
}
return resource.NonRetryableError(err)
}

if volumeResp.Volume.Server != nil {
if volume.Server != nil {
return resource.RetryableError(fmt.Errorf("volume is still attached to a server"))
}

Expand Down
Loading