1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. compute
  5. Autoscaler
Google Cloud Classic v7.29.0 published on Wednesday, Jun 26, 2024 by Pulumi

gcp.compute.Autoscaler

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.29.0 published on Wednesday, Jun 26, 2024 by Pulumi

    Represents an Autoscaler resource.

    Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define.

    To get more information about Autoscaler, see:

    Example Usage

    Autoscaler Single Instance

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const debian9 = gcp.compute.getImage({
        family: "debian-11",
        project: "debian-cloud",
    });
    const defaultInstanceTemplate = new gcp.compute.InstanceTemplate("default", {
        name: "my-instance-template",
        machineType: "e2-medium",
        canIpForward: false,
        tags: [
            "foo",
            "bar",
        ],
        disks: [{
            sourceImage: debian9.then(debian9 => debian9.id),
        }],
        networkInterfaces: [{
            network: "default",
        }],
        metadata: {
            foo: "bar",
        },
        serviceAccount: {
            scopes: [
                "userinfo-email",
                "compute-ro",
                "storage-ro",
            ],
        },
    });
    const defaultTargetPool = new gcp.compute.TargetPool("default", {name: "my-target-pool"});
    const defaultInstanceGroupManager = new gcp.compute.InstanceGroupManager("default", {
        name: "my-igm",
        zone: "us-central1-f",
        versions: [{
            instanceTemplate: defaultInstanceTemplate.id,
            name: "primary",
        }],
        targetPools: [defaultTargetPool.id],
        baseInstanceName: "autoscaler-sample",
    });
    const _default = new gcp.compute.Autoscaler("default", {
        name: "my-autoscaler",
        zone: "us-central1-f",
        target: defaultInstanceGroupManager.id,
        autoscalingPolicy: {
            maxReplicas: 5,
            minReplicas: 1,
            cooldownPeriod: 60,
            metrics: [{
                name: "pubsub.googleapis.com/subscription/num_undelivered_messages",
                filter: "resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription",
                singleInstanceAssignment: 65535,
            }],
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    debian9 = gcp.compute.get_image(family="debian-11",
        project="debian-cloud")
    default_instance_template = gcp.compute.InstanceTemplate("default",
        name="my-instance-template",
        machine_type="e2-medium",
        can_ip_forward=False,
        tags=[
            "foo",
            "bar",
        ],
        disks=[gcp.compute.InstanceTemplateDiskArgs(
            source_image=debian9.id,
        )],
        network_interfaces=[gcp.compute.InstanceTemplateNetworkInterfaceArgs(
            network="default",
        )],
        metadata={
            "foo": "bar",
        },
        service_account=gcp.compute.InstanceTemplateServiceAccountArgs(
            scopes=[
                "userinfo-email",
                "compute-ro",
                "storage-ro",
            ],
        ))
    default_target_pool = gcp.compute.TargetPool("default", name="my-target-pool")
    default_instance_group_manager = gcp.compute.InstanceGroupManager("default",
        name="my-igm",
        zone="us-central1-f",
        versions=[gcp.compute.InstanceGroupManagerVersionArgs(
            instance_template=default_instance_template.id,
            name="primary",
        )],
        target_pools=[default_target_pool.id],
        base_instance_name="autoscaler-sample")
    default = gcp.compute.Autoscaler("default",
        name="my-autoscaler",
        zone="us-central1-f",
        target=default_instance_group_manager.id,
        autoscaling_policy=gcp.compute.AutoscalerAutoscalingPolicyArgs(
            max_replicas=5,
            min_replicas=1,
            cooldown_period=60,
            metrics=[gcp.compute.AutoscalerAutoscalingPolicyMetricArgs(
                name="pubsub.googleapis.com/subscription/num_undelivered_messages",
                filter="resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription",
                single_instance_assignment=65535,
            )],
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/compute"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		debian9, err := compute.LookupImage(ctx, &compute.LookupImageArgs{
    			Family:  pulumi.StringRef("debian-11"),
    			Project: pulumi.StringRef("debian-cloud"),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		defaultInstanceTemplate, err := compute.NewInstanceTemplate(ctx, "default", &compute.InstanceTemplateArgs{
    			Name:         pulumi.String("my-instance-template"),
    			MachineType:  pulumi.String("e2-medium"),
    			CanIpForward: pulumi.Bool(false),
    			Tags: pulumi.StringArray{
    				pulumi.String("foo"),
    				pulumi.String("bar"),
    			},
    			Disks: compute.InstanceTemplateDiskArray{
    				&compute.InstanceTemplateDiskArgs{
    					SourceImage: pulumi.String(debian9.Id),
    				},
    			},
    			NetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{
    				&compute.InstanceTemplateNetworkInterfaceArgs{
    					Network: pulumi.String("default"),
    				},
    			},
    			Metadata: pulumi.Map{
    				"foo": pulumi.Any("bar"),
    			},
    			ServiceAccount: &compute.InstanceTemplateServiceAccountArgs{
    				Scopes: pulumi.StringArray{
    					pulumi.String("userinfo-email"),
    					pulumi.String("compute-ro"),
    					pulumi.String("storage-ro"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		defaultTargetPool, err := compute.NewTargetPool(ctx, "default", &compute.TargetPoolArgs{
    			Name: pulumi.String("my-target-pool"),
    		})
    		if err != nil {
    			return err
    		}
    		defaultInstanceGroupManager, err := compute.NewInstanceGroupManager(ctx, "default", &compute.InstanceGroupManagerArgs{
    			Name: pulumi.String("my-igm"),
    			Zone: pulumi.String("us-central1-f"),
    			Versions: compute.InstanceGroupManagerVersionArray{
    				&compute.InstanceGroupManagerVersionArgs{
    					InstanceTemplate: defaultInstanceTemplate.ID(),
    					Name:             pulumi.String("primary"),
    				},
    			},
    			TargetPools: pulumi.StringArray{
    				defaultTargetPool.ID(),
    			},
    			BaseInstanceName: pulumi.String("autoscaler-sample"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = compute.NewAutoscaler(ctx, "default", &compute.AutoscalerArgs{
    			Name:   pulumi.String("my-autoscaler"),
    			Zone:   pulumi.String("us-central1-f"),
    			Target: defaultInstanceGroupManager.ID(),
    			AutoscalingPolicy: &compute.AutoscalerAutoscalingPolicyArgs{
    				MaxReplicas:    pulumi.Int(5),
    				MinReplicas:    pulumi.Int(1),
    				CooldownPeriod: pulumi.Int(60),
    				Metrics: compute.AutoscalerAutoscalingPolicyMetricArray{
    					&compute.AutoscalerAutoscalingPolicyMetricArgs{
    						Name:                     pulumi.String("pubsub.googleapis.com/subscription/num_undelivered_messages"),
    						Filter:                   pulumi.String("resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription"),
    						SingleInstanceAssignment: pulumi.Float64(65535),
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var debian9 = Gcp.Compute.GetImage.Invoke(new()
        {
            Family = "debian-11",
            Project = "debian-cloud",
        });
    
        var defaultInstanceTemplate = new Gcp.Compute.InstanceTemplate("default", new()
        {
            Name = "my-instance-template",
            MachineType = "e2-medium",
            CanIpForward = false,
            Tags = new[]
            {
                "foo",
                "bar",
            },
            Disks = new[]
            {
                new Gcp.Compute.Inputs.InstanceTemplateDiskArgs
                {
                    SourceImage = debian9.Apply(getImageResult => getImageResult.Id),
                },
            },
            NetworkInterfaces = new[]
            {
                new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs
                {
                    Network = "default",
                },
            },
            Metadata = 
            {
                { "foo", "bar" },
            },
            ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs
            {
                Scopes = new[]
                {
                    "userinfo-email",
                    "compute-ro",
                    "storage-ro",
                },
            },
        });
    
        var defaultTargetPool = new Gcp.Compute.TargetPool("default", new()
        {
            Name = "my-target-pool",
        });
    
        var defaultInstanceGroupManager = new Gcp.Compute.InstanceGroupManager("default", new()
        {
            Name = "my-igm",
            Zone = "us-central1-f",
            Versions = new[]
            {
                new Gcp.Compute.Inputs.InstanceGroupManagerVersionArgs
                {
                    InstanceTemplate = defaultInstanceTemplate.Id,
                    Name = "primary",
                },
            },
            TargetPools = new[]
            {
                defaultTargetPool.Id,
            },
            BaseInstanceName = "autoscaler-sample",
        });
    
        var @default = new Gcp.Compute.Autoscaler("default", new()
        {
            Name = "my-autoscaler",
            Zone = "us-central1-f",
            Target = defaultInstanceGroupManager.Id,
            AutoscalingPolicy = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyArgs
            {
                MaxReplicas = 5,
                MinReplicas = 1,
                CooldownPeriod = 60,
                Metrics = new[]
                {
                    new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyMetricArgs
                    {
                        Name = "pubsub.googleapis.com/subscription/num_undelivered_messages",
                        Filter = "resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription",
                        SingleInstanceAssignment = 65535,
                    },
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.compute.ComputeFunctions;
    import com.pulumi.gcp.compute.inputs.GetImageArgs;
    import com.pulumi.gcp.compute.InstanceTemplate;
    import com.pulumi.gcp.compute.InstanceTemplateArgs;
    import com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;
    import com.pulumi.gcp.compute.inputs.InstanceTemplateNetworkInterfaceArgs;
    import com.pulumi.gcp.compute.inputs.InstanceTemplateServiceAccountArgs;
    import com.pulumi.gcp.compute.TargetPool;
    import com.pulumi.gcp.compute.TargetPoolArgs;
    import com.pulumi.gcp.compute.InstanceGroupManager;
    import com.pulumi.gcp.compute.InstanceGroupManagerArgs;
    import com.pulumi.gcp.compute.inputs.InstanceGroupManagerVersionArgs;
    import com.pulumi.gcp.compute.Autoscaler;
    import com.pulumi.gcp.compute.AutoscalerArgs;
    import com.pulumi.gcp.compute.inputs.AutoscalerAutoscalingPolicyArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var debian9 = ComputeFunctions.getImage(GetImageArgs.builder()
                .family("debian-11")
                .project("debian-cloud")
                .build());
    
            var defaultInstanceTemplate = new InstanceTemplate("defaultInstanceTemplate", InstanceTemplateArgs.builder()
                .name("my-instance-template")
                .machineType("e2-medium")
                .canIpForward(false)
                .tags(            
                    "foo",
                    "bar")
                .disks(InstanceTemplateDiskArgs.builder()
                    .sourceImage(debian9.applyValue(getImageResult -> getImageResult.id()))
                    .build())
                .networkInterfaces(InstanceTemplateNetworkInterfaceArgs.builder()
                    .network("default")
                    .build())
                .metadata(Map.of("foo", "bar"))
                .serviceAccount(InstanceTemplateServiceAccountArgs.builder()
                    .scopes(                
                        "userinfo-email",
                        "compute-ro",
                        "storage-ro")
                    .build())
                .build());
    
            var defaultTargetPool = new TargetPool("defaultTargetPool", TargetPoolArgs.builder()
                .name("my-target-pool")
                .build());
    
            var defaultInstanceGroupManager = new InstanceGroupManager("defaultInstanceGroupManager", InstanceGroupManagerArgs.builder()
                .name("my-igm")
                .zone("us-central1-f")
                .versions(InstanceGroupManagerVersionArgs.builder()
                    .instanceTemplate(defaultInstanceTemplate.id())
                    .name("primary")
                    .build())
                .targetPools(defaultTargetPool.id())
                .baseInstanceName("autoscaler-sample")
                .build());
    
            var default_ = new Autoscaler("default", AutoscalerArgs.builder()
                .name("my-autoscaler")
                .zone("us-central1-f")
                .target(defaultInstanceGroupManager.id())
                .autoscalingPolicy(AutoscalerAutoscalingPolicyArgs.builder()
                    .maxReplicas(5)
                    .minReplicas(1)
                    .cooldownPeriod(60)
                    .metrics(AutoscalerAutoscalingPolicyMetricArgs.builder()
                        .name("pubsub.googleapis.com/subscription/num_undelivered_messages")
                        .filter("resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription")
                        .singleInstanceAssignment(65535)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      default:
        type: gcp:compute:Autoscaler
        properties:
          name: my-autoscaler
          zone: us-central1-f
          target: ${defaultInstanceGroupManager.id}
          autoscalingPolicy:
            maxReplicas: 5
            minReplicas: 1
            cooldownPeriod: 60
            metrics:
              - name: pubsub.googleapis.com/subscription/num_undelivered_messages
                filter: resource.type = pubsub_subscription AND resource.label.subscription_id = our-subscription
                singleInstanceAssignment: 65535
      defaultInstanceTemplate:
        type: gcp:compute:InstanceTemplate
        name: default
        properties:
          name: my-instance-template
          machineType: e2-medium
          canIpForward: false
          tags:
            - foo
            - bar
          disks:
            - sourceImage: ${debian9.id}
          networkInterfaces:
            - network: default
          metadata:
            foo: bar
          serviceAccount:
            scopes:
              - userinfo-email
              - compute-ro
              - storage-ro
      defaultTargetPool:
        type: gcp:compute:TargetPool
        name: default
        properties:
          name: my-target-pool
      defaultInstanceGroupManager:
        type: gcp:compute:InstanceGroupManager
        name: default
        properties:
          name: my-igm
          zone: us-central1-f
          versions:
            - instanceTemplate: ${defaultInstanceTemplate.id}
              name: primary
          targetPools:
            - ${defaultTargetPool.id}
          baseInstanceName: autoscaler-sample
    variables:
      debian9:
        fn::invoke:
          Function: gcp:compute:getImage
          Arguments:
            family: debian-11
            project: debian-cloud
    

    Autoscaler Basic

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const debian9 = gcp.compute.getImage({
        family: "debian-11",
        project: "debian-cloud",
    });
    const foobarInstanceTemplate = new gcp.compute.InstanceTemplate("foobar", {
        name: "my-instance-template",
        machineType: "e2-medium",
        canIpForward: false,
        tags: [
            "foo",
            "bar",
        ],
        disks: [{
            sourceImage: debian9.then(debian9 => debian9.id),
        }],
        networkInterfaces: [{
            network: "default",
        }],
        metadata: {
            foo: "bar",
        },
        serviceAccount: {
            scopes: [
                "userinfo-email",
                "compute-ro",
                "storage-ro",
            ],
        },
    });
    const foobarTargetPool = new gcp.compute.TargetPool("foobar", {name: "my-target-pool"});
    const foobarInstanceGroupManager = new gcp.compute.InstanceGroupManager("foobar", {
        name: "my-igm",
        zone: "us-central1-f",
        versions: [{
            instanceTemplate: foobarInstanceTemplate.id,
            name: "primary",
        }],
        targetPools: [foobarTargetPool.id],
        baseInstanceName: "foobar",
    });
    const foobar = new gcp.compute.Autoscaler("foobar", {
        name: "my-autoscaler",
        zone: "us-central1-f",
        target: foobarInstanceGroupManager.id,
        autoscalingPolicy: {
            maxReplicas: 5,
            minReplicas: 1,
            cooldownPeriod: 60,
            cpuUtilization: {
                target: 0.5,
            },
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    debian9 = gcp.compute.get_image(family="debian-11",
        project="debian-cloud")
    foobar_instance_template = gcp.compute.InstanceTemplate("foobar",
        name="my-instance-template",
        machine_type="e2-medium",
        can_ip_forward=False,
        tags=[
            "foo",
            "bar",
        ],
        disks=[gcp.compute.InstanceTemplateDiskArgs(
            source_image=debian9.id,
        )],
        network_interfaces=[gcp.compute.InstanceTemplateNetworkInterfaceArgs(
            network="default",
        )],
        metadata={
            "foo": "bar",
        },
        service_account=gcp.compute.InstanceTemplateServiceAccountArgs(
            scopes=[
                "userinfo-email",
                "compute-ro",
                "storage-ro",
            ],
        ))
    foobar_target_pool = gcp.compute.TargetPool("foobar", name="my-target-pool")
    foobar_instance_group_manager = gcp.compute.InstanceGroupManager("foobar",
        name="my-igm",
        zone="us-central1-f",
        versions=[gcp.compute.InstanceGroupManagerVersionArgs(
            instance_template=foobar_instance_template.id,
            name="primary",
        )],
        target_pools=[foobar_target_pool.id],
        base_instance_name="foobar")
    foobar = gcp.compute.Autoscaler("foobar",
        name="my-autoscaler",
        zone="us-central1-f",
        target=foobar_instance_group_manager.id,
        autoscaling_policy=gcp.compute.AutoscalerAutoscalingPolicyArgs(
            max_replicas=5,
            min_replicas=1,
            cooldown_period=60,
            cpu_utilization=gcp.compute.AutoscalerAutoscalingPolicyCpuUtilizationArgs(
                target=0.5,
            ),
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/compute"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		debian9, err := compute.LookupImage(ctx, &compute.LookupImageArgs{
    			Family:  pulumi.StringRef("debian-11"),
    			Project: pulumi.StringRef("debian-cloud"),
    		}, nil)
    		if err != nil {
    			return err
    		}
    		foobarInstanceTemplate, err := compute.NewInstanceTemplate(ctx, "foobar", &compute.InstanceTemplateArgs{
    			Name:         pulumi.String("my-instance-template"),
    			MachineType:  pulumi.String("e2-medium"),
    			CanIpForward: pulumi.Bool(false),
    			Tags: pulumi.StringArray{
    				pulumi.String("foo"),
    				pulumi.String("bar"),
    			},
    			Disks: compute.InstanceTemplateDiskArray{
    				&compute.InstanceTemplateDiskArgs{
    					SourceImage: pulumi.String(debian9.Id),
    				},
    			},
    			NetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{
    				&compute.InstanceTemplateNetworkInterfaceArgs{
    					Network: pulumi.String("default"),
    				},
    			},
    			Metadata: pulumi.Map{
    				"foo": pulumi.Any("bar"),
    			},
    			ServiceAccount: &compute.InstanceTemplateServiceAccountArgs{
    				Scopes: pulumi.StringArray{
    					pulumi.String("userinfo-email"),
    					pulumi.String("compute-ro"),
    					pulumi.String("storage-ro"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		foobarTargetPool, err := compute.NewTargetPool(ctx, "foobar", &compute.TargetPoolArgs{
    			Name: pulumi.String("my-target-pool"),
    		})
    		if err != nil {
    			return err
    		}
    		foobarInstanceGroupManager, err := compute.NewInstanceGroupManager(ctx, "foobar", &compute.InstanceGroupManagerArgs{
    			Name: pulumi.String("my-igm"),
    			Zone: pulumi.String("us-central1-f"),
    			Versions: compute.InstanceGroupManagerVersionArray{
    				&compute.InstanceGroupManagerVersionArgs{
    					InstanceTemplate: foobarInstanceTemplate.ID(),
    					Name:             pulumi.String("primary"),
    				},
    			},
    			TargetPools: pulumi.StringArray{
    				foobarTargetPool.ID(),
    			},
    			BaseInstanceName: pulumi.String("foobar"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = compute.NewAutoscaler(ctx, "foobar", &compute.AutoscalerArgs{
    			Name:   pulumi.String("my-autoscaler"),
    			Zone:   pulumi.String("us-central1-f"),
    			Target: foobarInstanceGroupManager.ID(),
    			AutoscalingPolicy: &compute.AutoscalerAutoscalingPolicyArgs{
    				MaxReplicas:    pulumi.Int(5),
    				MinReplicas:    pulumi.Int(1),
    				CooldownPeriod: pulumi.Int(60),
    				CpuUtilization: &compute.AutoscalerAutoscalingPolicyCpuUtilizationArgs{
    					Target: pulumi.Float64(0.5),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var debian9 = Gcp.Compute.GetImage.Invoke(new()
        {
            Family = "debian-11",
            Project = "debian-cloud",
        });
    
        var foobarInstanceTemplate = new Gcp.Compute.InstanceTemplate("foobar", new()
        {
            Name = "my-instance-template",
            MachineType = "e2-medium",
            CanIpForward = false,
            Tags = new[]
            {
                "foo",
                "bar",
            },
            Disks = new[]
            {
                new Gcp.Compute.Inputs.InstanceTemplateDiskArgs
                {
                    SourceImage = debian9.Apply(getImageResult => getImageResult.Id),
                },
            },
            NetworkInterfaces = new[]
            {
                new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs
                {
                    Network = "default",
                },
            },
            Metadata = 
            {
                { "foo", "bar" },
            },
            ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs
            {
                Scopes = new[]
                {
                    "userinfo-email",
                    "compute-ro",
                    "storage-ro",
                },
            },
        });
    
        var foobarTargetPool = new Gcp.Compute.TargetPool("foobar", new()
        {
            Name = "my-target-pool",
        });
    
        var foobarInstanceGroupManager = new Gcp.Compute.InstanceGroupManager("foobar", new()
        {
            Name = "my-igm",
            Zone = "us-central1-f",
            Versions = new[]
            {
                new Gcp.Compute.Inputs.InstanceGroupManagerVersionArgs
                {
                    InstanceTemplate = foobarInstanceTemplate.Id,
                    Name = "primary",
                },
            },
            TargetPools = new[]
            {
                foobarTargetPool.Id,
            },
            BaseInstanceName = "foobar",
        });
    
        var foobar = new Gcp.Compute.Autoscaler("foobar", new()
        {
            Name = "my-autoscaler",
            Zone = "us-central1-f",
            Target = foobarInstanceGroupManager.Id,
            AutoscalingPolicy = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyArgs
            {
                MaxReplicas = 5,
                MinReplicas = 1,
                CooldownPeriod = 60,
                CpuUtilization = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyCpuUtilizationArgs
                {
                    Target = 0.5,
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.compute.ComputeFunctions;
    import com.pulumi.gcp.compute.inputs.GetImageArgs;
    import com.pulumi.gcp.compute.InstanceTemplate;
    import com.pulumi.gcp.compute.InstanceTemplateArgs;
    import com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;
    import com.pulumi.gcp.compute.inputs.InstanceTemplateNetworkInterfaceArgs;
    import com.pulumi.gcp.compute.inputs.InstanceTemplateServiceAccountArgs;
    import com.pulumi.gcp.compute.TargetPool;
    import com.pulumi.gcp.compute.TargetPoolArgs;
    import com.pulumi.gcp.compute.InstanceGroupManager;
    import com.pulumi.gcp.compute.InstanceGroupManagerArgs;
    import com.pulumi.gcp.compute.inputs.InstanceGroupManagerVersionArgs;
    import com.pulumi.gcp.compute.Autoscaler;
    import com.pulumi.gcp.compute.AutoscalerArgs;
    import com.pulumi.gcp.compute.inputs.AutoscalerAutoscalingPolicyArgs;
    import com.pulumi.gcp.compute.inputs.AutoscalerAutoscalingPolicyCpuUtilizationArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var debian9 = ComputeFunctions.getImage(GetImageArgs.builder()
                .family("debian-11")
                .project("debian-cloud")
                .build());
    
            var foobarInstanceTemplate = new InstanceTemplate("foobarInstanceTemplate", InstanceTemplateArgs.builder()
                .name("my-instance-template")
                .machineType("e2-medium")
                .canIpForward(false)
                .tags(            
                    "foo",
                    "bar")
                .disks(InstanceTemplateDiskArgs.builder()
                    .sourceImage(debian9.applyValue(getImageResult -> getImageResult.id()))
                    .build())
                .networkInterfaces(InstanceTemplateNetworkInterfaceArgs.builder()
                    .network("default")
                    .build())
                .metadata(Map.of("foo", "bar"))
                .serviceAccount(InstanceTemplateServiceAccountArgs.builder()
                    .scopes(                
                        "userinfo-email",
                        "compute-ro",
                        "storage-ro")
                    .build())
                .build());
    
            var foobarTargetPool = new TargetPool("foobarTargetPool", TargetPoolArgs.builder()
                .name("my-target-pool")
                .build());
    
            var foobarInstanceGroupManager = new InstanceGroupManager("foobarInstanceGroupManager", InstanceGroupManagerArgs.builder()
                .name("my-igm")
                .zone("us-central1-f")
                .versions(InstanceGroupManagerVersionArgs.builder()
                    .instanceTemplate(foobarInstanceTemplate.id())
                    .name("primary")
                    .build())
                .targetPools(foobarTargetPool.id())
                .baseInstanceName("foobar")
                .build());
    
            var foobar = new Autoscaler("foobar", AutoscalerArgs.builder()
                .name("my-autoscaler")
                .zone("us-central1-f")
                .target(foobarInstanceGroupManager.id())
                .autoscalingPolicy(AutoscalerAutoscalingPolicyArgs.builder()
                    .maxReplicas(5)
                    .minReplicas(1)
                    .cooldownPeriod(60)
                    .cpuUtilization(AutoscalerAutoscalingPolicyCpuUtilizationArgs.builder()
                        .target(0.5)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      foobar:
        type: gcp:compute:Autoscaler
        properties:
          name: my-autoscaler
          zone: us-central1-f
          target: ${foobarInstanceGroupManager.id}
          autoscalingPolicy:
            maxReplicas: 5
            minReplicas: 1
            cooldownPeriod: 60
            cpuUtilization:
              target: 0.5
      foobarInstanceTemplate:
        type: gcp:compute:InstanceTemplate
        name: foobar
        properties:
          name: my-instance-template
          machineType: e2-medium
          canIpForward: false
          tags:
            - foo
            - bar
          disks:
            - sourceImage: ${debian9.id}
          networkInterfaces:
            - network: default
          metadata:
            foo: bar
          serviceAccount:
            scopes:
              - userinfo-email
              - compute-ro
              - storage-ro
      foobarTargetPool:
        type: gcp:compute:TargetPool
        name: foobar
        properties:
          name: my-target-pool
      foobarInstanceGroupManager:
        type: gcp:compute:InstanceGroupManager
        name: foobar
        properties:
          name: my-igm
          zone: us-central1-f
          versions:
            - instanceTemplate: ${foobarInstanceTemplate.id}
              name: primary
          targetPools:
            - ${foobarTargetPool.id}
          baseInstanceName: foobar
    variables:
      debian9:
        fn::invoke:
          Function: gcp:compute:getImage
          Arguments:
            family: debian-11
            project: debian-cloud
    

    Create Autoscaler Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Autoscaler(name: string, args: AutoscalerArgs, opts?: CustomResourceOptions);
    @overload
    def Autoscaler(resource_name: str,
                   args: AutoscalerArgs,
                   opts: Optional[ResourceOptions] = None)
    
    @overload
    def Autoscaler(resource_name: str,
                   opts: Optional[ResourceOptions] = None,
                   autoscaling_policy: Optional[AutoscalerAutoscalingPolicyArgs] = None,
                   target: Optional[str] = None,
                   description: Optional[str] = None,
                   name: Optional[str] = None,
                   project: Optional[str] = None,
                   zone: Optional[str] = None)
    func NewAutoscaler(ctx *Context, name string, args AutoscalerArgs, opts ...ResourceOption) (*Autoscaler, error)
    public Autoscaler(string name, AutoscalerArgs args, CustomResourceOptions? opts = null)
    public Autoscaler(String name, AutoscalerArgs args)
    public Autoscaler(String name, AutoscalerArgs args, CustomResourceOptions options)
    
    type: gcp:compute:Autoscaler
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args AutoscalerArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args AutoscalerArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args AutoscalerArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args AutoscalerArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args AutoscalerArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var autoscalerResource = new Gcp.Compute.Autoscaler("autoscalerResource", new()
    {
        AutoscalingPolicy = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyArgs
        {
            MaxReplicas = 0,
            MinReplicas = 0,
            CooldownPeriod = 0,
            CpuUtilization = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyCpuUtilizationArgs
            {
                Target = 0,
                PredictiveMethod = "string",
            },
            LoadBalancingUtilization = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyLoadBalancingUtilizationArgs
            {
                Target = 0,
            },
            Metrics = new[]
            {
                new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyMetricArgs
                {
                    Name = "string",
                    Filter = "string",
                    SingleInstanceAssignment = 0,
                    Target = 0,
                    Type = "string",
                },
            },
            Mode = "string",
            ScaleDownControl = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyScaleDownControlArgs
            {
                MaxScaledDownReplicas = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicasArgs
                {
                    Fixed = 0,
                    Percent = 0,
                },
                TimeWindowSec = 0,
            },
            ScaleInControl = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyScaleInControlArgs
            {
                MaxScaledInReplicas = new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasArgs
                {
                    Fixed = 0,
                    Percent = 0,
                },
                TimeWindowSec = 0,
            },
            ScalingSchedules = new[]
            {
                new Gcp.Compute.Inputs.AutoscalerAutoscalingPolicyScalingScheduleArgs
                {
                    DurationSec = 0,
                    MinRequiredReplicas = 0,
                    Name = "string",
                    Schedule = "string",
                    Description = "string",
                    Disabled = false,
                    TimeZone = "string",
                },
            },
        },
        Target = "string",
        Description = "string",
        Name = "string",
        Project = "string",
        Zone = "string",
    });
    
    example, err := compute.NewAutoscaler(ctx, "autoscalerResource", &compute.AutoscalerArgs{
    	AutoscalingPolicy: &compute.AutoscalerAutoscalingPolicyArgs{
    		MaxReplicas:    pulumi.Int(0),
    		MinReplicas:    pulumi.Int(0),
    		CooldownPeriod: pulumi.Int(0),
    		CpuUtilization: &compute.AutoscalerAutoscalingPolicyCpuUtilizationArgs{
    			Target:           pulumi.Float64(0),
    			PredictiveMethod: pulumi.String("string"),
    		},
    		LoadBalancingUtilization: &compute.AutoscalerAutoscalingPolicyLoadBalancingUtilizationArgs{
    			Target: pulumi.Float64(0),
    		},
    		Metrics: compute.AutoscalerAutoscalingPolicyMetricArray{
    			&compute.AutoscalerAutoscalingPolicyMetricArgs{
    				Name:                     pulumi.String("string"),
    				Filter:                   pulumi.String("string"),
    				SingleInstanceAssignment: pulumi.Float64(0),
    				Target:                   pulumi.Float64(0),
    				Type:                     pulumi.String("string"),
    			},
    		},
    		Mode: pulumi.String("string"),
    		ScaleDownControl: &compute.AutoscalerAutoscalingPolicyScaleDownControlArgs{
    			MaxScaledDownReplicas: &compute.AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicasArgs{
    				Fixed:   pulumi.Int(0),
    				Percent: pulumi.Int(0),
    			},
    			TimeWindowSec: pulumi.Int(0),
    		},
    		ScaleInControl: &compute.AutoscalerAutoscalingPolicyScaleInControlArgs{
    			MaxScaledInReplicas: &compute.AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasArgs{
    				Fixed:   pulumi.Int(0),
    				Percent: pulumi.Int(0),
    			},
    			TimeWindowSec: pulumi.Int(0),
    		},
    		ScalingSchedules: compute.AutoscalerAutoscalingPolicyScalingScheduleArray{
    			&compute.AutoscalerAutoscalingPolicyScalingScheduleArgs{
    				DurationSec:         pulumi.Int(0),
    				MinRequiredReplicas: pulumi.Int(0),
    				Name:                pulumi.String("string"),
    				Schedule:            pulumi.String("string"),
    				Description:         pulumi.String("string"),
    				Disabled:            pulumi.Bool(false),
    				TimeZone:            pulumi.String("string"),
    			},
    		},
    	},
    	Target:      pulumi.String("string"),
    	Description: pulumi.String("string"),
    	Name:        pulumi.String("string"),
    	Project:     pulumi.String("string"),
    	Zone:        pulumi.String("string"),
    })
    
    var autoscalerResource = new Autoscaler("autoscalerResource", AutoscalerArgs.builder()
        .autoscalingPolicy(AutoscalerAutoscalingPolicyArgs.builder()
            .maxReplicas(0)
            .minReplicas(0)
            .cooldownPeriod(0)
            .cpuUtilization(AutoscalerAutoscalingPolicyCpuUtilizationArgs.builder()
                .target(0)
                .predictiveMethod("string")
                .build())
            .loadBalancingUtilization(AutoscalerAutoscalingPolicyLoadBalancingUtilizationArgs.builder()
                .target(0)
                .build())
            .metrics(AutoscalerAutoscalingPolicyMetricArgs.builder()
                .name("string")
                .filter("string")
                .singleInstanceAssignment(0)
                .target(0)
                .type("string")
                .build())
            .mode("string")
            .scaleDownControl(AutoscalerAutoscalingPolicyScaleDownControlArgs.builder()
                .maxScaledDownReplicas(AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicasArgs.builder()
                    .fixed(0)
                    .percent(0)
                    .build())
                .timeWindowSec(0)
                .build())
            .scaleInControl(AutoscalerAutoscalingPolicyScaleInControlArgs.builder()
                .maxScaledInReplicas(AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasArgs.builder()
                    .fixed(0)
                    .percent(0)
                    .build())
                .timeWindowSec(0)
                .build())
            .scalingSchedules(AutoscalerAutoscalingPolicyScalingScheduleArgs.builder()
                .durationSec(0)
                .minRequiredReplicas(0)
                .name("string")
                .schedule("string")
                .description("string")
                .disabled(false)
                .timeZone("string")
                .build())
            .build())
        .target("string")
        .description("string")
        .name("string")
        .project("string")
        .zone("string")
        .build());
    
    autoscaler_resource = gcp.compute.Autoscaler("autoscalerResource",
        autoscaling_policy=gcp.compute.AutoscalerAutoscalingPolicyArgs(
            max_replicas=0,
            min_replicas=0,
            cooldown_period=0,
            cpu_utilization=gcp.compute.AutoscalerAutoscalingPolicyCpuUtilizationArgs(
                target=0,
                predictive_method="string",
            ),
            load_balancing_utilization=gcp.compute.AutoscalerAutoscalingPolicyLoadBalancingUtilizationArgs(
                target=0,
            ),
            metrics=[gcp.compute.AutoscalerAutoscalingPolicyMetricArgs(
                name="string",
                filter="string",
                single_instance_assignment=0,
                target=0,
                type="string",
            )],
            mode="string",
            scale_down_control=gcp.compute.AutoscalerAutoscalingPolicyScaleDownControlArgs(
                max_scaled_down_replicas=gcp.compute.AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicasArgs(
                    fixed=0,
                    percent=0,
                ),
                time_window_sec=0,
            ),
            scale_in_control=gcp.compute.AutoscalerAutoscalingPolicyScaleInControlArgs(
                max_scaled_in_replicas=gcp.compute.AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasArgs(
                    fixed=0,
                    percent=0,
                ),
                time_window_sec=0,
            ),
            scaling_schedules=[gcp.compute.AutoscalerAutoscalingPolicyScalingScheduleArgs(
                duration_sec=0,
                min_required_replicas=0,
                name="string",
                schedule="string",
                description="string",
                disabled=False,
                time_zone="string",
            )],
        ),
        target="string",
        description="string",
        name="string",
        project="string",
        zone="string")
    
    const autoscalerResource = new gcp.compute.Autoscaler("autoscalerResource", {
        autoscalingPolicy: {
            maxReplicas: 0,
            minReplicas: 0,
            cooldownPeriod: 0,
            cpuUtilization: {
                target: 0,
                predictiveMethod: "string",
            },
            loadBalancingUtilization: {
                target: 0,
            },
            metrics: [{
                name: "string",
                filter: "string",
                singleInstanceAssignment: 0,
                target: 0,
                type: "string",
            }],
            mode: "string",
            scaleDownControl: {
                maxScaledDownReplicas: {
                    fixed: 0,
                    percent: 0,
                },
                timeWindowSec: 0,
            },
            scaleInControl: {
                maxScaledInReplicas: {
                    fixed: 0,
                    percent: 0,
                },
                timeWindowSec: 0,
            },
            scalingSchedules: [{
                durationSec: 0,
                minRequiredReplicas: 0,
                name: "string",
                schedule: "string",
                description: "string",
                disabled: false,
                timeZone: "string",
            }],
        },
        target: "string",
        description: "string",
        name: "string",
        project: "string",
        zone: "string",
    });
    
    type: gcp:compute:Autoscaler
    properties:
        autoscalingPolicy:
            cooldownPeriod: 0
            cpuUtilization:
                predictiveMethod: string
                target: 0
            loadBalancingUtilization:
                target: 0
            maxReplicas: 0
            metrics:
                - filter: string
                  name: string
                  singleInstanceAssignment: 0
                  target: 0
                  type: string
            minReplicas: 0
            mode: string
            scaleDownControl:
                maxScaledDownReplicas:
                    fixed: 0
                    percent: 0
                timeWindowSec: 0
            scaleInControl:
                maxScaledInReplicas:
                    fixed: 0
                    percent: 0
                timeWindowSec: 0
            scalingSchedules:
                - description: string
                  disabled: false
                  durationSec: 0
                  minRequiredReplicas: 0
                  name: string
                  schedule: string
                  timeZone: string
        description: string
        name: string
        project: string
        target: string
        zone: string
    

    Autoscaler Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Autoscaler resource accepts the following input properties:

    AutoscalingPolicy AutoscalerAutoscalingPolicy
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    Target string
    URL of the managed instance group that this autoscaler will scale.
    Description string
    An optional description of this resource.
    Name string
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    Project string
    Zone string
    URL of the zone where the instance group resides.
    AutoscalingPolicy AutoscalerAutoscalingPolicyArgs
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    Target string
    URL of the managed instance group that this autoscaler will scale.
    Description string
    An optional description of this resource.
    Name string
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    Project string
    Zone string
    URL of the zone where the instance group resides.
    autoscalingPolicy AutoscalerAutoscalingPolicy
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    target String
    URL of the managed instance group that this autoscaler will scale.
    description String
    An optional description of this resource.
    name String
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    project String
    zone String
    URL of the zone where the instance group resides.
    autoscalingPolicy AutoscalerAutoscalingPolicy
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    target string
    URL of the managed instance group that this autoscaler will scale.
    description string
    An optional description of this resource.
    name string
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    project string
    zone string
    URL of the zone where the instance group resides.
    autoscaling_policy AutoscalerAutoscalingPolicyArgs
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    target str
    URL of the managed instance group that this autoscaler will scale.
    description str
    An optional description of this resource.
    name str
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    project str
    zone str
    URL of the zone where the instance group resides.
    autoscalingPolicy Property Map
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    target String
    URL of the managed instance group that this autoscaler will scale.
    description String
    An optional description of this resource.
    name String
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    project String
    zone String
    URL of the zone where the instance group resides.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Autoscaler resource produces the following output properties:

    CreationTimestamp string
    Creation timestamp in RFC3339 text format.
    Id string
    The provider-assigned unique ID for this managed resource.
    SelfLink string
    The URI of the created resource.
    CreationTimestamp string
    Creation timestamp in RFC3339 text format.
    Id string
    The provider-assigned unique ID for this managed resource.
    SelfLink string
    The URI of the created resource.
    creationTimestamp String
    Creation timestamp in RFC3339 text format.
    id String
    The provider-assigned unique ID for this managed resource.
    selfLink String
    The URI of the created resource.
    creationTimestamp string
    Creation timestamp in RFC3339 text format.
    id string
    The provider-assigned unique ID for this managed resource.
    selfLink string
    The URI of the created resource.
    creation_timestamp str
    Creation timestamp in RFC3339 text format.
    id str
    The provider-assigned unique ID for this managed resource.
    self_link str
    The URI of the created resource.
    creationTimestamp String
    Creation timestamp in RFC3339 text format.
    id String
    The provider-assigned unique ID for this managed resource.
    selfLink String
    The URI of the created resource.

    Look up Existing Autoscaler Resource

    Get an existing Autoscaler resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: AutoscalerState, opts?: CustomResourceOptions): Autoscaler
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            autoscaling_policy: Optional[AutoscalerAutoscalingPolicyArgs] = None,
            creation_timestamp: Optional[str] = None,
            description: Optional[str] = None,
            name: Optional[str] = None,
            project: Optional[str] = None,
            self_link: Optional[str] = None,
            target: Optional[str] = None,
            zone: Optional[str] = None) -> Autoscaler
    func GetAutoscaler(ctx *Context, name string, id IDInput, state *AutoscalerState, opts ...ResourceOption) (*Autoscaler, error)
    public static Autoscaler Get(string name, Input<string> id, AutoscalerState? state, CustomResourceOptions? opts = null)
    public static Autoscaler get(String name, Output<String> id, AutoscalerState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AutoscalingPolicy AutoscalerAutoscalingPolicy
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    CreationTimestamp string
    Creation timestamp in RFC3339 text format.
    Description string
    An optional description of this resource.
    Name string
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    Project string
    SelfLink string
    The URI of the created resource.
    Target string
    URL of the managed instance group that this autoscaler will scale.
    Zone string
    URL of the zone where the instance group resides.
    AutoscalingPolicy AutoscalerAutoscalingPolicyArgs
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    CreationTimestamp string
    Creation timestamp in RFC3339 text format.
    Description string
    An optional description of this resource.
    Name string
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    Project string
    SelfLink string
    The URI of the created resource.
    Target string
    URL of the managed instance group that this autoscaler will scale.
    Zone string
    URL of the zone where the instance group resides.
    autoscalingPolicy AutoscalerAutoscalingPolicy
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    creationTimestamp String
    Creation timestamp in RFC3339 text format.
    description String
    An optional description of this resource.
    name String
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    project String
    selfLink String
    The URI of the created resource.
    target String
    URL of the managed instance group that this autoscaler will scale.
    zone String
    URL of the zone where the instance group resides.
    autoscalingPolicy AutoscalerAutoscalingPolicy
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    creationTimestamp string
    Creation timestamp in RFC3339 text format.
    description string
    An optional description of this resource.
    name string
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    project string
    selfLink string
    The URI of the created resource.
    target string
    URL of the managed instance group that this autoscaler will scale.
    zone string
    URL of the zone where the instance group resides.
    autoscaling_policy AutoscalerAutoscalingPolicyArgs
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    creation_timestamp str
    Creation timestamp in RFC3339 text format.
    description str
    An optional description of this resource.
    name str
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    project str
    self_link str
    The URI of the created resource.
    target str
    URL of the managed instance group that this autoscaler will scale.
    zone str
    URL of the zone where the instance group resides.
    autoscalingPolicy Property Map
    The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization. If none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%. Structure is documented below.
    creationTimestamp String
    Creation timestamp in RFC3339 text format.
    description String
    An optional description of this resource.
    name String
    Name of the resource. The name must be 1-63 characters long and match the regular expression a-z? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
    project String
    selfLink String
    The URI of the created resource.
    target String
    URL of the managed instance group that this autoscaler will scale.
    zone String
    URL of the zone where the instance group resides.

    Supporting Types

    AutoscalerAutoscalingPolicy, AutoscalerAutoscalingPolicyArgs

    MaxReplicas int
    The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
    MinReplicas int
    The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
    CooldownPeriod int
    The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
    CpuUtilization AutoscalerAutoscalingPolicyCpuUtilization
    Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
    LoadBalancingUtilization AutoscalerAutoscalingPolicyLoadBalancingUtilization
    Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
    Metrics List<AutoscalerAutoscalingPolicyMetric>
    Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
    Mode string
    Defines operating mode for this policy.
    ScaleDownControl AutoscalerAutoscalingPolicyScaleDownControl
    Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    ScaleInControl AutoscalerAutoscalingPolicyScaleInControl
    Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    ScalingSchedules List<AutoscalerAutoscalingPolicyScalingSchedule>
    Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
    MaxReplicas int
    The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
    MinReplicas int
    The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
    CooldownPeriod int
    The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
    CpuUtilization AutoscalerAutoscalingPolicyCpuUtilization
    Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
    LoadBalancingUtilization AutoscalerAutoscalingPolicyLoadBalancingUtilization
    Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
    Metrics []AutoscalerAutoscalingPolicyMetric
    Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
    Mode string
    Defines operating mode for this policy.
    ScaleDownControl AutoscalerAutoscalingPolicyScaleDownControl
    Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    ScaleInControl AutoscalerAutoscalingPolicyScaleInControl
    Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    ScalingSchedules []AutoscalerAutoscalingPolicyScalingSchedule
    Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
    maxReplicas Integer
    The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
    minReplicas Integer
    The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
    cooldownPeriod Integer
    The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
    cpuUtilization AutoscalerAutoscalingPolicyCpuUtilization
    Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
    loadBalancingUtilization AutoscalerAutoscalingPolicyLoadBalancingUtilization
    Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
    metrics List<AutoscalerAutoscalingPolicyMetric>
    Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
    mode String
    Defines operating mode for this policy.
    scaleDownControl AutoscalerAutoscalingPolicyScaleDownControl
    Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    scaleInControl AutoscalerAutoscalingPolicyScaleInControl
    Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    scalingSchedules List<AutoscalerAutoscalingPolicyScalingSchedule>
    Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
    maxReplicas number
    The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
    minReplicas number
    The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
    cooldownPeriod number
    The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
    cpuUtilization AutoscalerAutoscalingPolicyCpuUtilization
    Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
    loadBalancingUtilization AutoscalerAutoscalingPolicyLoadBalancingUtilization
    Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
    metrics AutoscalerAutoscalingPolicyMetric[]
    Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
    mode string
    Defines operating mode for this policy.
    scaleDownControl AutoscalerAutoscalingPolicyScaleDownControl
    Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    scaleInControl AutoscalerAutoscalingPolicyScaleInControl
    Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    scalingSchedules AutoscalerAutoscalingPolicyScalingSchedule[]
    Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
    max_replicas int
    The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
    min_replicas int
    The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
    cooldown_period int
    The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
    cpu_utilization AutoscalerAutoscalingPolicyCpuUtilization
    Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
    load_balancing_utilization AutoscalerAutoscalingPolicyLoadBalancingUtilization
    Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
    metrics Sequence[AutoscalerAutoscalingPolicyMetric]
    Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
    mode str
    Defines operating mode for this policy.
    scale_down_control AutoscalerAutoscalingPolicyScaleDownControl
    Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    scale_in_control AutoscalerAutoscalingPolicyScaleInControl
    Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    scaling_schedules Sequence[AutoscalerAutoscalingPolicyScalingSchedule]
    Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.
    maxReplicas Number
    The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.
    minReplicas Number
    The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.
    cooldownPeriod Number
    The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.
    cpuUtilization Property Map
    Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group. Structure is documented below.
    loadBalancingUtilization Property Map
    Configuration parameters of autoscaling based on a load balancer. Structure is documented below.
    metrics List<Property Map>
    Configuration parameters of autoscaling based on a custom metric. Structure is documented below.
    mode String
    Defines operating mode for this policy.
    scaleDownControl Property Map
    Defines scale down controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    scaleInControl Property Map
    Defines scale in controls to reduce the risk of response latency and outages due to abrupt scale-in events Structure is documented below.
    scalingSchedules List<Property Map>
    Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. Structure is documented below.

    AutoscalerAutoscalingPolicyCpuUtilization, AutoscalerAutoscalingPolicyCpuUtilizationArgs

    Target double
    The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
    PredictiveMethod string
    Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

    • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
    • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
    Target float64
    The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
    PredictiveMethod string
    Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

    • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
    • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
    target Double
    The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
    predictiveMethod String
    Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

    • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
    • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
    target number
    The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
    predictiveMethod string
    Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

    • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
    • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
    target float
    The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
    predictive_method str
    Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

    • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
    • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.
    target Number
    The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6. If the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization. If the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.
    predictiveMethod String
    Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are:

    • NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics.
    • OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.

    AutoscalerAutoscalingPolicyLoadBalancingUtilization, AutoscalerAutoscalingPolicyLoadBalancingUtilizationArgs

    Target double
    Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
    Target float64
    Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
    target Double
    Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
    target number
    Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
    target float
    Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.
    target Number
    Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.

    AutoscalerAutoscalingPolicyMetric, AutoscalerAutoscalingPolicyMetricArgs

    Name string
    The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
    Filter string
    A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
    SingleInstanceAssignment double
    If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.
    Target double
    The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
    Type string
    Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are: GAUGE, DELTA_PER_SECOND, DELTA_PER_MINUTE.
    Name string
    The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
    Filter string
    A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
    SingleInstanceAssignment float64
    If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.
    Target float64
    The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
    Type string
    Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are: GAUGE, DELTA_PER_SECOND, DELTA_PER_MINUTE.
    name String
    The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
    filter String
    A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
    singleInstanceAssignment Double
    If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.
    target Double
    The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
    type String
    Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are: GAUGE, DELTA_PER_SECOND, DELTA_PER_MINUTE.
    name string
    The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
    filter string
    A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
    singleInstanceAssignment number
    If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.
    target number
    The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
    type string
    Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are: GAUGE, DELTA_PER_SECOND, DELTA_PER_MINUTE.
    name str
    The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
    filter str
    A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
    single_instance_assignment float
    If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.
    target float
    The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
    type str
    Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are: GAUGE, DELTA_PER_SECOND, DELTA_PER_MINUTE.
    name String
    The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values. The metric must have a value type of INT64 or DOUBLE.
    filter String
    A filter string to be used as the filter string for a Stackdriver Monitoring TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data. You can only use the AND operator for joining selectors. You can only use direct equality comparison operator (=) without any functions for each selector. You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels. If the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling. If not specified, the type defaults to gce_instance. You should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.
    singleInstanceAssignment Number
    If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. The autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing. For example, a good metric to use with the target is pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances. A bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.
    target Number
    The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric. For example, a good metric to use as a utilizationTarget is www.googleapis.com/compute/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.
    type String
    Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values are: GAUGE, DELTA_PER_SECOND, DELTA_PER_MINUTE.

    AutoscalerAutoscalingPolicyScaleDownControl, AutoscalerAutoscalingPolicyScaleDownControlArgs

    MaxScaledDownReplicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas
    A nested object resource Structure is documented below.
    TimeWindowSec int
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    MaxScaledDownReplicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas
    A nested object resource Structure is documented below.
    TimeWindowSec int
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    maxScaledDownReplicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas
    A nested object resource Structure is documented below.
    timeWindowSec Integer
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    maxScaledDownReplicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas
    A nested object resource Structure is documented below.
    timeWindowSec number
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    max_scaled_down_replicas AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas
    A nested object resource Structure is documented below.
    time_window_sec int
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    maxScaledDownReplicas Property Map
    A nested object resource Structure is documented below.
    timeWindowSec Number
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

    AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicas, AutoscalerAutoscalingPolicyScaleDownControlMaxScaledDownReplicasArgs

    Fixed int
    Specifies a fixed number of VM instances. This must be a positive integer.
    Percent int
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    Fixed int
    Specifies a fixed number of VM instances. This must be a positive integer.
    Percent int
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    fixed Integer
    Specifies a fixed number of VM instances. This must be a positive integer.
    percent Integer
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    fixed number
    Specifies a fixed number of VM instances. This must be a positive integer.
    percent number
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    fixed int
    Specifies a fixed number of VM instances. This must be a positive integer.
    percent int
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    fixed Number
    Specifies a fixed number of VM instances. This must be a positive integer.
    percent Number
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

    AutoscalerAutoscalingPolicyScaleInControl, AutoscalerAutoscalingPolicyScaleInControlArgs

    MaxScaledInReplicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas
    A nested object resource Structure is documented below.
    TimeWindowSec int
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    MaxScaledInReplicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas
    A nested object resource Structure is documented below.
    TimeWindowSec int
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    maxScaledInReplicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas
    A nested object resource Structure is documented below.
    timeWindowSec Integer
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    maxScaledInReplicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas
    A nested object resource Structure is documented below.
    timeWindowSec number
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    max_scaled_in_replicas AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas
    A nested object resource Structure is documented below.
    time_window_sec int
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.
    maxScaledInReplicas Property Map
    A nested object resource Structure is documented below.
    timeWindowSec Number
    How long back autoscaling should look when computing recommendations to include directives regarding slower scale down, as described above.

    AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas, AutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasArgs

    Fixed int
    Specifies a fixed number of VM instances. This must be a positive integer.
    Percent int
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    Fixed int
    Specifies a fixed number of VM instances. This must be a positive integer.
    Percent int
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    fixed Integer
    Specifies a fixed number of VM instances. This must be a positive integer.
    percent Integer
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    fixed number
    Specifies a fixed number of VM instances. This must be a positive integer.
    percent number
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    fixed int
    Specifies a fixed number of VM instances. This must be a positive integer.
    percent int
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.
    fixed Number
    Specifies a fixed number of VM instances. This must be a positive integer.
    percent Number
    Specifies a percentage of instances between 0 to 100%, inclusive. For example, specify 80 for 80%.

    AutoscalerAutoscalingPolicyScalingSchedule, AutoscalerAutoscalingPolicyScalingScheduleArgs

    DurationSec int
    The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
    MinRequiredReplicas int
    Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
    Name string
    The identifier for this object. Format specified above.
    Schedule string
    The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
    Description string
    An optional description of this resource.
    Disabled bool
    A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
    TimeZone string
    The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
    DurationSec int
    The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
    MinRequiredReplicas int
    Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
    Name string
    The identifier for this object. Format specified above.
    Schedule string
    The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
    Description string
    An optional description of this resource.
    Disabled bool
    A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
    TimeZone string
    The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
    durationSec Integer
    The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
    minRequiredReplicas Integer
    Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
    name String
    The identifier for this object. Format specified above.
    schedule String
    The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
    description String
    An optional description of this resource.
    disabled Boolean
    A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
    timeZone String
    The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
    durationSec number
    The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
    minRequiredReplicas number
    Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
    name string
    The identifier for this object. Format specified above.
    schedule string
    The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
    description string
    An optional description of this resource.
    disabled boolean
    A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
    timeZone string
    The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
    duration_sec int
    The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
    min_required_replicas int
    Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
    name str
    The identifier for this object. Format specified above.
    schedule str
    The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
    description str
    An optional description of this resource.
    disabled bool
    A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
    time_zone str
    The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.
    durationSec Number
    The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.
    minRequiredReplicas Number
    Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.
    name String
    The identifier for this object. Format specified above.
    schedule String
    The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).
    description String
    An optional description of this resource.
    disabled Boolean
    A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.
    timeZone String
    The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.

    Import

    Autoscaler can be imported using any of these accepted formats:

    • projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}

    • {{project}}/{{zone}}/{{name}}

    • {{zone}}/{{name}}

    • {{name}}

    When using the pulumi import command, Autoscaler can be imported using one of the formats above. For example:

    $ pulumi import gcp:compute/autoscaler:Autoscaler default projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}
    
    $ pulumi import gcp:compute/autoscaler:Autoscaler default {{project}}/{{zone}}/{{name}}
    
    $ pulumi import gcp:compute/autoscaler:Autoscaler default {{zone}}/{{name}}
    
    $ pulumi import gcp:compute/autoscaler:Autoscaler default {{name}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v7.29.0 published on Wednesday, Jun 26, 2024 by Pulumi