|
4 | 4 | "context"
|
5 | 5 | "encoding/json"
|
6 | 6 | "fmt"
|
| 7 | + |
7 | 8 | "github.com/hashicorp/go-cty/cty"
|
8 | 9 | "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
9 | 10 | "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff"
|
@@ -336,6 +337,9 @@ func ResourceServerCreate(ctx context.Context, d *schema.ResourceData, m interfa
|
336 | 337 | if file != "" {
|
337 | 338 | todecode, _ := file.(string)
|
338 | 339 | err = json.Unmarshal([]byte(todecode), &partitioningSchema)
|
| 340 | + if err != nil { |
| 341 | + return diag.FromErr(err) |
| 342 | + } |
339 | 343 | }
|
340 | 344 | req.Install = &baremetal.CreateServerRequestInstall{
|
341 | 345 | OsID: zonal.ExpandID(d.Get("os")).ID,
|
@@ -487,63 +491,6 @@ func ResourceServerRead(ctx context.Context, d *schema.ResourceData, m interface
|
487 | 491 | return nil
|
488 | 492 | }
|
489 | 493 |
|
490 |
| -//func schemaToStringList(schema *baremetal.Schema) string { |
491 |
| -// var result string |
492 |
| -// |
493 |
| -// if schema.Disks != nil { |
494 |
| -// for _, disk := range schema.Disks { |
495 |
| -// if disk != nil { |
496 |
| -// result += "Disk: " + disk.Device |
497 |
| -// if disk.Partitions != nil { |
498 |
| -// for _, partition := range disk.Partitions { |
499 |
| -// if partition != nil { |
500 |
| -// result += fmt.Sprintf(" Partition: %s Number: %d Size: %s", partition.Label, partition.Number, partition.Size.String()) |
501 |
| -// } |
502 |
| -// } |
503 |
| -// } |
504 |
| -// } |
505 |
| -// } |
506 |
| -// } |
507 |
| -// |
508 |
| -// if schema.Raids != nil { |
509 |
| -// for _, raid := range schema.Raids { |
510 |
| -// if raid != nil { |
511 |
| -// result += fmt.Sprintf("RAID: %s Level: %s", raid.Name, raid.Level) |
512 |
| -// for _, device := range raid.Devices { |
513 |
| -// result += " Device: " + device |
514 |
| -// } |
515 |
| -// } |
516 |
| -// } |
517 |
| -// } |
518 |
| -// |
519 |
| -// if schema.Filesystems != nil { |
520 |
| -// for _, fs := range schema.Filesystems { |
521 |
| -// if fs != nil { |
522 |
| -// result += fmt.Sprintf("Filesystem: %s Format: %s Mountpoint: %s", fs.Device, fs.Format, fs.Mountpoint) |
523 |
| -// } |
524 |
| -// } |
525 |
| -// } |
526 |
| -// |
527 |
| -// if schema.Zfs != nil { |
528 |
| -// for _, pool := range schema.Zfs.Pools { |
529 |
| -// if pool != nil { |
530 |
| -// result += fmt.Sprintf("ZFS Pool: %s Type: %s", pool.Name, pool.Type) |
531 |
| -// for _, device := range pool.Devices { |
532 |
| -// result += " Device: " + device |
533 |
| -// } |
534 |
| -// for _, option := range pool.Options { |
535 |
| -// result += " Option: " + option |
536 |
| -// } |
537 |
| -// for _, fsOption := range pool.FilesystemOptions { |
538 |
| -// result += " Filesystem Option: " + fsOption |
539 |
| -// } |
540 |
| -// } |
541 |
| -// } |
542 |
| -// } |
543 |
| -// |
544 |
| -// return result |
545 |
| -//} |
546 |
| - |
547 | 494 | //gocyclo:ignore
|
548 | 495 | func ResourceServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
549 | 496 | api, zonedID, err := NewAPIWithZoneAndID(m, d.Id())
|
|
0 commit comments