1. Packages
  2. Google Cloud (GCP) Classic
  3. API Docs
  4. managedkafka
  5. Topic
Google Cloud Classic v7.29.0 published on Wednesday, Jun 26, 2024 by Pulumi

gcp.managedkafka.Topic

Explore with Pulumi AI

gcp logo
Google Cloud Classic v7.29.0 published on Wednesday, Jun 26, 2024 by Pulumi

    Example Usage

    Managedkafka Topic Basic

    import * as pulumi from "@pulumi/pulumi";
    import * as gcp from "@pulumi/gcp";
    
    const project = gcp.organizations.getProject({});
    const cluster = new gcp.managedkafka.Cluster("cluster", {
        clusterId: "my-cluster",
        location: "us-central1",
        capacityConfig: {
            vcpuCount: "3",
            memoryBytes: "3221225472",
        },
        gcpConfig: {
            accessConfig: {
                networkConfigs: [{
                    subnet: project.then(project => `projects/${project.number}/regions/us-central1/subnetworks/default`),
                }],
            },
        },
    });
    const example = new gcp.managedkafka.Topic("example", {
        topicId: "my-topic",
        cluster: cluster.clusterId,
        location: "us-central1",
        partitionCount: 2,
        replicationFactor: 3,
        configs: {
            "cleanup.policy": "compact",
        },
    });
    
    import pulumi
    import pulumi_gcp as gcp
    
    project = gcp.organizations.get_project()
    cluster = gcp.managedkafka.Cluster("cluster",
        cluster_id="my-cluster",
        location="us-central1",
        capacity_config=gcp.managedkafka.ClusterCapacityConfigArgs(
            vcpu_count="3",
            memory_bytes="3221225472",
        ),
        gcp_config=gcp.managedkafka.ClusterGcpConfigArgs(
            access_config=gcp.managedkafka.ClusterGcpConfigAccessConfigArgs(
                network_configs=[gcp.managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArgs(
                    subnet=f"projects/{project.number}/regions/us-central1/subnetworks/default",
                )],
            ),
        ))
    example = gcp.managedkafka.Topic("example",
        topic_id="my-topic",
        cluster=cluster.cluster_id,
        location="us-central1",
        partition_count=2,
        replication_factor=3,
        configs={
            "cleanup.policy": "compact",
        })
    
    package main
    
    import (
    	"fmt"
    
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/managedkafka"
    	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/organizations"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		project, err := organizations.LookupProject(ctx, nil, nil)
    		if err != nil {
    			return err
    		}
    		cluster, err := managedkafka.NewCluster(ctx, "cluster", &managedkafka.ClusterArgs{
    			ClusterId: pulumi.String("my-cluster"),
    			Location:  pulumi.String("us-central1"),
    			CapacityConfig: &managedkafka.ClusterCapacityConfigArgs{
    				VcpuCount:   pulumi.String("3"),
    				MemoryBytes: pulumi.String("3221225472"),
    			},
    			GcpConfig: &managedkafka.ClusterGcpConfigArgs{
    				AccessConfig: &managedkafka.ClusterGcpConfigAccessConfigArgs{
    					NetworkConfigs: managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArray{
    						&managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArgs{
    							Subnet: pulumi.String(fmt.Sprintf("projects/%v/regions/us-central1/subnetworks/default", project.Number)),
    						},
    					},
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = managedkafka.NewTopic(ctx, "example", &managedkafka.TopicArgs{
    			TopicId:           pulumi.String("my-topic"),
    			Cluster:           cluster.ClusterId,
    			Location:          pulumi.String("us-central1"),
    			PartitionCount:    pulumi.Int(2),
    			ReplicationFactor: pulumi.Int(3),
    			Configs: pulumi.StringMap{
    				"cleanup.policy": pulumi.String("compact"),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Gcp = Pulumi.Gcp;
    
    return await Deployment.RunAsync(() => 
    {
        var project = Gcp.Organizations.GetProject.Invoke();
    
        var cluster = new Gcp.ManagedKafka.Cluster("cluster", new()
        {
            ClusterId = "my-cluster",
            Location = "us-central1",
            CapacityConfig = new Gcp.ManagedKafka.Inputs.ClusterCapacityConfigArgs
            {
                VcpuCount = "3",
                MemoryBytes = "3221225472",
            },
            GcpConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigArgs
            {
                AccessConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigArgs
                {
                    NetworkConfigs = new[]
                    {
                        new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigNetworkConfigArgs
                        {
                            Subnet = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}/regions/us-central1/subnetworks/default",
                        },
                    },
                },
            },
        });
    
        var example = new Gcp.ManagedKafka.Topic("example", new()
        {
            TopicId = "my-topic",
            Cluster = cluster.ClusterId,
            Location = "us-central1",
            PartitionCount = 2,
            ReplicationFactor = 3,
            Configs = 
            {
                { "cleanup.policy", "compact" },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.gcp.organizations.OrganizationsFunctions;
    import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
    import com.pulumi.gcp.managedkafka.Cluster;
    import com.pulumi.gcp.managedkafka.ClusterArgs;
    import com.pulumi.gcp.managedkafka.inputs.ClusterCapacityConfigArgs;
    import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigArgs;
    import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigAccessConfigArgs;
    import com.pulumi.gcp.managedkafka.Topic;
    import com.pulumi.gcp.managedkafka.TopicArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            final var project = OrganizationsFunctions.getProject();
    
            var cluster = new Cluster("cluster", ClusterArgs.builder()
                .clusterId("my-cluster")
                .location("us-central1")
                .capacityConfig(ClusterCapacityConfigArgs.builder()
                    .vcpuCount(3)
                    .memoryBytes(3221225472)
                    .build())
                .gcpConfig(ClusterGcpConfigArgs.builder()
                    .accessConfig(ClusterGcpConfigAccessConfigArgs.builder()
                        .networkConfigs(ClusterGcpConfigAccessConfigNetworkConfigArgs.builder()
                            .subnet(String.format("projects/%s/regions/us-central1/subnetworks/default", project.applyValue(getProjectResult -> getProjectResult.number())))
                            .build())
                        .build())
                    .build())
                .build());
    
            var example = new Topic("example", TopicArgs.builder()
                .topicId("my-topic")
                .cluster(cluster.clusterId())
                .location("us-central1")
                .partitionCount(2)
                .replicationFactor(3)
                .configs(Map.of("cleanup.policy", "compact"))
                .build());
    
        }
    }
    
    resources:
      cluster:
        type: gcp:managedkafka:Cluster
        properties:
          clusterId: my-cluster
          location: us-central1
          capacityConfig:
            vcpuCount: 3
            memoryBytes: 3.221225472e+09
          gcpConfig:
            accessConfig:
              networkConfigs:
                - subnet: projects/${project.number}/regions/us-central1/subnetworks/default
      example:
        type: gcp:managedkafka:Topic
        properties:
          topicId: my-topic
          cluster: ${cluster.clusterId}
          location: us-central1
          partitionCount: 2
          replicationFactor: 3
          configs:
            cleanup.policy: compact
    variables:
      project:
        fn::invoke:
          Function: gcp:organizations:getProject
          Arguments: {}
    

    Create Topic Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Topic(name: string, args: TopicArgs, opts?: CustomResourceOptions);
    @overload
    def Topic(resource_name: str,
              args: TopicArgs,
              opts: Optional[ResourceOptions] = None)
    
    @overload
    def Topic(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              cluster: Optional[str] = None,
              location: Optional[str] = None,
              replication_factor: Optional[int] = None,
              topic_id: Optional[str] = None,
              configs: Optional[Mapping[str, str]] = None,
              partition_count: Optional[int] = None,
              project: Optional[str] = None)
    func NewTopic(ctx *Context, name string, args TopicArgs, opts ...ResourceOption) (*Topic, error)
    public Topic(string name, TopicArgs args, CustomResourceOptions? opts = null)
    public Topic(String name, TopicArgs args)
    public Topic(String name, TopicArgs args, CustomResourceOptions options)
    
    type: gcp:managedkafka:Topic
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args TopicArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var topicResource = new Gcp.ManagedKafka.Topic("topicResource", new()
    {
        Cluster = "string",
        Location = "string",
        ReplicationFactor = 0,
        TopicId = "string",
        Configs = 
        {
            { "string", "string" },
        },
        PartitionCount = 0,
        Project = "string",
    });
    
    example, err := managedkafka.NewTopic(ctx, "topicResource", &managedkafka.TopicArgs{
    	Cluster:           pulumi.String("string"),
    	Location:          pulumi.String("string"),
    	ReplicationFactor: pulumi.Int(0),
    	TopicId:           pulumi.String("string"),
    	Configs: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	PartitionCount: pulumi.Int(0),
    	Project:        pulumi.String("string"),
    })
    
    var topicResource = new Topic("topicResource", TopicArgs.builder()
        .cluster("string")
        .location("string")
        .replicationFactor(0)
        .topicId("string")
        .configs(Map.of("string", "string"))
        .partitionCount(0)
        .project("string")
        .build());
    
    topic_resource = gcp.managedkafka.Topic("topicResource",
        cluster="string",
        location="string",
        replication_factor=0,
        topic_id="string",
        configs={
            "string": "string",
        },
        partition_count=0,
        project="string")
    
    const topicResource = new gcp.managedkafka.Topic("topicResource", {
        cluster: "string",
        location: "string",
        replicationFactor: 0,
        topicId: "string",
        configs: {
            string: "string",
        },
        partitionCount: 0,
        project: "string",
    });
    
    type: gcp:managedkafka:Topic
    properties:
        cluster: string
        configs:
            string: string
        location: string
        partitionCount: 0
        project: string
        replicationFactor: 0
        topicId: string
    

    Topic Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Topic resource accepts the following input properties:

    Cluster string
    The cluster name.
    Location string
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    ReplicationFactor int
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    TopicId string
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    Configs Dictionary<string, string>
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    PartitionCount int
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    Cluster string
    The cluster name.
    Location string
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    ReplicationFactor int
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    TopicId string
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    Configs map[string]string
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    PartitionCount int
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    cluster String
    The cluster name.
    location String
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    replicationFactor Integer
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    topicId String
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    configs Map<String,String>
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    partitionCount Integer
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    cluster string
    The cluster name.
    location string
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    replicationFactor number
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    topicId string
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    configs {[key: string]: string}
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    partitionCount number
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    cluster str
    The cluster name.
    location str
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    replication_factor int
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    topic_id str
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    configs Mapping[str, str]
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    partition_count int
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    cluster String
    The cluster name.
    location String
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    replicationFactor Number
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    topicId String
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    configs Map<String>
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    partitionCount Number
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Topic resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    Id string
    The provider-assigned unique ID for this managed resource.
    Name string
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    id string
    The provider-assigned unique ID for this managed resource.
    name string
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    id str
    The provider-assigned unique ID for this managed resource.
    name str
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    id String
    The provider-assigned unique ID for this managed resource.
    name String
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.

    Look up Existing Topic Resource

    Get an existing Topic resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: TopicState, opts?: CustomResourceOptions): Topic
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            cluster: Optional[str] = None,
            configs: Optional[Mapping[str, str]] = None,
            location: Optional[str] = None,
            name: Optional[str] = None,
            partition_count: Optional[int] = None,
            project: Optional[str] = None,
            replication_factor: Optional[int] = None,
            topic_id: Optional[str] = None) -> Topic
    func GetTopic(ctx *Context, name string, id IDInput, state *TopicState, opts ...ResourceOption) (*Topic, error)
    public static Topic Get(string name, Input<string> id, TopicState? state, CustomResourceOptions? opts = null)
    public static Topic get(String name, Output<String> id, TopicState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Cluster string
    The cluster name.
    Configs Dictionary<string, string>
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    Location string
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    Name string
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    PartitionCount int
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    ReplicationFactor int
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    TopicId string
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    Cluster string
    The cluster name.
    Configs map[string]string
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    Location string
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    Name string
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    PartitionCount int
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    Project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    ReplicationFactor int
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    TopicId string
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    cluster String
    The cluster name.
    configs Map<String,String>
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    location String
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    name String
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    partitionCount Integer
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    replicationFactor Integer
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    topicId String
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    cluster string
    The cluster name.
    configs {[key: string]: string}
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    location string
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    name string
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    partitionCount number
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    project string
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    replicationFactor number
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    topicId string
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    cluster str
    The cluster name.
    configs Mapping[str, str]
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    location str
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    name str
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    partition_count int
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    project str
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    replication_factor int
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    topic_id str
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    cluster String
    The cluster name.
    configs Map<String>
    Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: cleanup.policy=compact, compression.type=producer.
    location String
    ID of the location of the Apache Kafka for BigQuery resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
    name String
    The name of the topic. The topic segment is used when connecting directly to the cluster. Must be in the format projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID.
    partitionCount Number
    The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
    project String
    The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
    replicationFactor Number
    The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
    topicId String
    The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: my-topic-name.


    Import

    Topic can be imported using any of these accepted formats:

    • projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/topics/{{topic_id}}

    • {{project}}/{{location}}/{{cluster}}/{{topic_id}}

    • {{location}}/{{cluster}}/{{topic_id}}

    When using the pulumi import command, Topic can be imported using one of the formats above. For example:

    $ pulumi import gcp:managedkafka/topic:Topic default projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/topics/{{topic_id}}
    
    $ pulumi import gcp:managedkafka/topic:Topic default {{project}}/{{location}}/{{cluster}}/{{topic_id}}
    
    $ pulumi import gcp:managedkafka/topic:Topic default {{location}}/{{cluster}}/{{topic_id}}
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Google Cloud (GCP) Classic pulumi/pulumi-gcp
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the google-beta Terraform Provider.
    gcp logo
    Google Cloud Classic v7.29.0 published on Wednesday, Jun 26, 2024 by Pulumi