yandex.MdbClickhouseCluster
Explore with Pulumi AI
Manages a ClickHouse cluster within the Yandex.Cloud. For more information, see the official documentation.
Example Usage
Example of creating a Single Node ClickHouse.
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.5.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
        {
            Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
            {
                Config = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigArgs
                {
                    BackgroundPoolSize = 16,
                    BackgroundSchedulePoolSize = 16,
                    Compression = 
                    {
                        
                        {
                            { "method", "LZ4" },
                            { "minPartSize", 1024 },
                            { "minPartSizeRatio", 0.5 },
                        },
                        
                        {
                            { "method", "ZSTD" },
                            { "minPartSize", 2048 },
                            { "minPartSizeRatio", 0.7 },
                        },
                    },
                    GeobaseUri = "",
                    GraphiteRollup = 
                    {
                        
                        {
                            { "name", "rollup1" },
                            { "pattern", 
                            {
                                
                                {
                                    { "function", "func1" },
                                    { "regexp", "abc" },
                                    { "retention", 
                                    {
                                        
                                        {
                                            { "age", 1000 },
                                            { "precision", 3 },
                                        },
                                    } },
                                },
                            } },
                        },
                        
                        {
                            { "name", "rollup2" },
                            { "pattern", 
                            {
                                
                                {
                                    { "function", "func2" },
                                    { "retention", 
                                    {
                                        
                                        {
                                            { "age", 2000 },
                                            { "precision", 5 },
                                        },
                                    } },
                                },
                            } },
                        },
                    },
                    Kafka = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaArgs
                    {
                        SaslMechanism = "SASL_MECHANISM_GSSAPI",
                        SaslPassword = "pass1",
                        SaslUsername = "user1",
                        SecurityProtocol = "SECURITY_PROTOCOL_PLAINTEXT",
                    },
                    KafkaTopic = 
                    {
                        
                        {
                            { "name", "topic1" },
                            { "settings", 
                            {
                                { "saslMechanism", "SASL_MECHANISM_SCRAM_SHA_256" },
                                { "saslPassword", "pass2" },
                                { "saslUsername", "user2" },
                                { "securityProtocol", "SECURITY_PROTOCOL_SSL" },
                            } },
                        },
                        
                        {
                            { "name", "topic2" },
                            { "settings", 
                            {
                                { "saslMechanism", "SASL_MECHANISM_PLAIN" },
                                { "securityProtocol", "SECURITY_PROTOCOL_SASL_PLAINTEXT" },
                            } },
                        },
                    },
                    KeepAliveTimeout = 3000,
                    LogLevel = "TRACE",
                    MarkCacheSize = 5368709120,
                    MaxConcurrentQueries = 50,
                    MaxConnections = 100,
                    MaxPartitionSizeToDrop = 53687091200,
                    MaxTableSizeToDrop = 53687091200,
                    MergeTree = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigMergeTreeArgs
                    {
                        MaxBytesToMergeAtMinSpaceInPool = 1048576,
                        MaxReplicatedMergesInQueue = 16,
                        NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge = 8,
                        PartsToDelayInsert = 150,
                        PartsToThrowInsert = 300,
                        ReplicatedDeduplicationWindow = 100,
                        ReplicatedDeduplicationWindowSeconds = 604800,
                    },
                    MetricLogEnabled = true,
                    MetricLogRetentionSize = 536870912,
                    MetricLogRetentionTime = 2592000,
                    PartLogRetentionSize = 536870912,
                    PartLogRetentionTime = 2592000,
                    QueryLogRetentionSize = 1073741824,
                    QueryLogRetentionTime = 2592000,
                    QueryThreadLogEnabled = true,
                    QueryThreadLogRetentionSize = 536870912,
                    QueryThreadLogRetentionTime = 2592000,
                    Rabbitmq = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigRabbitmqArgs
                    {
                        Password = "rabbit_pass",
                        Username = "rabbit_user",
                    },
                    TextLogEnabled = true,
                    TextLogLevel = "TRACE",
                    TextLogRetentionSize = 536870912,
                    TextLogRetentionTime = 2592000,
                    Timezone = "UTC",
                    TraceLogEnabled = true,
                    TraceLogRetentionSize = 536870912,
                    TraceLogRetentionTime = 2592000,
                    UncompressedCacheSize = 8589934592,
                },
                Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
                {
                    DiskSize = 32,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
            CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
            {
                Enabled = false,
            },
            Databases = 
            {
                new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
                {
                    Name = "db_name",
                },
            },
            Environment = "PRESTABLE",
            FormatSchemas = 
            {
                new Yandex.Inputs.MdbClickhouseClusterFormatSchemaArgs
                {
                    Name = "test_schema",
                    Type = "FORMAT_SCHEMA_TYPE_CAPNPROTO",
                    Uri = "https://storage.yandexcloud.net/ch-data/schema.proto",
                },
            },
            Hosts = 
            {
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = fooVpcSubnet.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-a",
                },
            },
            MaintenanceWindow = new Yandex.Inputs.MdbClickhouseClusterMaintenanceWindowArgs
            {
                Type = "ANYTIME",
            },
            MlModels = 
            {
                new Yandex.Inputs.MdbClickhouseClusterMlModelArgs
                {
                    Name = "test_model",
                    Type = "ML_MODEL_TYPE_CATBOOST",
                    Uri = "https://storage.yandexcloud.net/ch-data/train.csv",
                },
            },
            NetworkId = fooVpcNetwork.Id,
            ServiceAccountId = "your_service_account_id",
            Users = 
            {
                new Yandex.Inputs.MdbClickhouseClusterUserArgs
                {
                    Name = "user",
                    Password = "your_password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
                        {
                            DatabaseName = "db_name",
                        },
                    },
                    Quotas = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Errors = 1000,
                            IntervalDuration = 3600000,
                            Queries = 10000,
                        },
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Error = 5000,
                            IntervalDuration = 79800000,
                            Queries = 50000,
                        },
                    },
                    Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
                    {
                        MaxMemoryUsageForUser = 1000000000,
                        OutputFormatJsonQuote64bitIntegers = true,
                        ReadOverflowMode = "throw",
                    },
                },
            },
        });
    }
}
package main
import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.5.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
			Clickhouse: &MdbClickhouseClusterClickhouseArgs{
				Config: &MdbClickhouseClusterClickhouseConfigArgs{
					BackgroundPoolSize:         pulumi.Int(16),
					BackgroundSchedulePoolSize: pulumi.Int(16),
					Compression: []interface{}{
						map[string]interface{}{
							"method":           "LZ4",
							"minPartSize":      1024,
							"minPartSizeRatio": 0.5,
						},
						map[string]interface{}{
							"method":           "ZSTD",
							"minPartSize":      2048,
							"minPartSizeRatio": 0.7,
						},
					},
					GeobaseUri: pulumi.String(""),
					GraphiteRollup: []interface{}{
						map[string]interface{}{
							"name": "rollup1",
							"pattern": []map[string]interface{}{
								map[string]interface{}{
									"function": "func1",
									"regexp":   "abc",
									"retention": []map[string]interface{}{
										map[string]interface{}{
											"age":       1000,
											"precision": 3,
										},
									},
								},
							},
						},
						map[string]interface{}{
							"name": "rollup2",
							"pattern": []map[string]interface{}{
								map[string]interface{}{
									"function": "func2",
									"retention": []map[string]interface{}{
										map[string]interface{}{
											"age":       2000,
											"precision": 5,
										},
									},
								},
							},
						},
					},
					Kafka: &MdbClickhouseClusterClickhouseConfigKafkaArgs{
						SaslMechanism:    pulumi.String("SASL_MECHANISM_GSSAPI"),
						SaslPassword:     pulumi.String("pass1"),
						SaslUsername:     pulumi.String("user1"),
						SecurityProtocol: pulumi.String("SECURITY_PROTOCOL_PLAINTEXT"),
					},
					KafkaTopic: []interface{}{
						map[string]interface{}{
							"name": "topic1",
							"settings": map[string]interface{}{
								"saslMechanism":    "SASL_MECHANISM_SCRAM_SHA_256",
								"saslPassword":     "pass2",
								"saslUsername":     "user2",
								"securityProtocol": "SECURITY_PROTOCOL_SSL",
							},
						},
						map[string]interface{}{
							"name": "topic2",
							"settings": map[string]interface{}{
								"saslMechanism":    "SASL_MECHANISM_PLAIN",
								"securityProtocol": "SECURITY_PROTOCOL_SASL_PLAINTEXT",
							},
						},
					},
					KeepAliveTimeout:       pulumi.Int(3000),
					LogLevel:               pulumi.String("TRACE"),
					MarkCacheSize:          pulumi.Int(5368709120),
					MaxConcurrentQueries:   pulumi.Int(50),
					MaxConnections:         pulumi.Int(100),
					MaxPartitionSizeToDrop: pulumi.Int(53687091200),
					MaxTableSizeToDrop:     pulumi.Int(53687091200),
					MergeTree: &MdbClickhouseClusterClickhouseConfigMergeTreeArgs{
						MaxBytesToMergeAtMinSpaceInPool:                pulumi.Int(1048576),
						MaxReplicatedMergesInQueue:                     pulumi.Int(16),
						NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: pulumi.Int(8),
						PartsToDelayInsert:                             pulumi.Int(150),
						PartsToThrowInsert:                             pulumi.Int(300),
						ReplicatedDeduplicationWindow:                  pulumi.Int(100),
						ReplicatedDeduplicationWindowSeconds:           pulumi.Int(604800),
					},
					MetricLogEnabled:            pulumi.Bool(true),
					MetricLogRetentionSize:      pulumi.Int(536870912),
					MetricLogRetentionTime:      pulumi.Int(2592000),
					PartLogRetentionSize:        pulumi.Int(536870912),
					PartLogRetentionTime:        pulumi.Int(2592000),
					QueryLogRetentionSize:       pulumi.Int(1073741824),
					QueryLogRetentionTime:       pulumi.Int(2592000),
					QueryThreadLogEnabled:       pulumi.Bool(true),
					QueryThreadLogRetentionSize: pulumi.Int(536870912),
					QueryThreadLogRetentionTime: pulumi.Int(2592000),
					Rabbitmq: &MdbClickhouseClusterClickhouseConfigRabbitmqArgs{
						Password: pulumi.String("rabbit_pass"),
						Username: pulumi.String("rabbit_user"),
					},
					TextLogEnabled:        pulumi.Bool(true),
					TextLogLevel:          pulumi.String("TRACE"),
					TextLogRetentionSize:  pulumi.Int(536870912),
					TextLogRetentionTime:  pulumi.Int(2592000),
					Timezone:              pulumi.String("UTC"),
					TraceLogEnabled:       pulumi.Bool(true),
					TraceLogRetentionSize: pulumi.Int(536870912),
					TraceLogRetentionTime: pulumi.Int(2592000),
					UncompressedCacheSize: pulumi.Int(8589934592),
				},
				Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
					DiskSize:         pulumi.Int(32),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
			CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
				Enabled: pulumi.Bool(false),
			},
			Databases: MdbClickhouseClusterDatabaseArray{
				&MdbClickhouseClusterDatabaseArgs{
					Name: pulumi.String("db_name"),
				},
			},
			Environment: pulumi.String("PRESTABLE"),
			FormatSchemas: MdbClickhouseClusterFormatSchemaArray{
				&MdbClickhouseClusterFormatSchemaArgs{
					Name: pulumi.String("test_schema"),
					Type: pulumi.String("FORMAT_SCHEMA_TYPE_CAPNPROTO"),
					Uri:  pulumi.String("https://storage.yandexcloud.net/ch-data/schema.proto"),
				},
			},
			Hosts: MdbClickhouseClusterHostArray{
				&MdbClickhouseClusterHostArgs{
					SubnetId: fooVpcSubnet.ID(),
					Type:     pulumi.String("CLICKHOUSE"),
					Zone:     pulumi.String("ru-central1-a"),
				},
			},
			MaintenanceWindow: &MdbClickhouseClusterMaintenanceWindowArgs{
				Type: pulumi.String("ANYTIME"),
			},
			MlModels: MdbClickhouseClusterMlModelArray{
				&MdbClickhouseClusterMlModelArgs{
					Name: pulumi.String("test_model"),
					Type: pulumi.String("ML_MODEL_TYPE_CATBOOST"),
					Uri:  pulumi.String("https://storage.yandexcloud.net/ch-data/train.csv"),
				},
			},
			NetworkId:        fooVpcNetwork.ID(),
			ServiceAccountId: pulumi.String("your_service_account_id"),
			Users: MdbClickhouseClusterUserArray{
				&MdbClickhouseClusterUserArgs{
					Name:     pulumi.String("user"),
					Password: pulumi.String("your_password"),
					Permissions: MdbClickhouseClusterUserPermissionArray{
						&MdbClickhouseClusterUserPermissionArgs{
							DatabaseName: pulumi.String("db_name"),
						},
					},
					Quotas: MdbClickhouseClusterUserQuotaArray{
						&MdbClickhouseClusterUserQuotaArgs{
							Errors:           pulumi.Int(1000),
							IntervalDuration: pulumi.Int(3600000),
							Queries:          pulumi.Int(10000),
						},
						&MdbClickhouseClusterUserQuotaArgs{
							Error:            5000,
							IntervalDuration: pulumi.Int(79800000),
							Queries:          pulumi.Int(50000),
						},
					},
					Settings: &MdbClickhouseClusterUserSettingsArgs{
						MaxMemoryUsageForUser:              pulumi.Int(1000000000),
						OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
						ReadOverflowMode:                   pulumi.String("throw"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Coming soon!
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.5.0.0/24"],
    zone: "ru-central1-a",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
    clickhouse: {
        config: {
            backgroundPoolSize: 16,
            backgroundSchedulePoolSize: 16,
            compressions: [
                {
                    method: "LZ4",
                    minPartSize: 1024,
                    minPartSizeRatio: 0.5,
                },
                {
                    method: "ZSTD",
                    minPartSize: 2048,
                    minPartSizeRatio: 0.7,
                },
            ],
            geobaseUri: "",
            graphiteRollups: [
                {
                    name: "rollup1",
                    patterns: [{
                        function: "func1",
                        regexp: "abc",
                        retentions: [{
                            age: 1000,
                            precision: 3,
                        }],
                    }],
                },
                {
                    name: "rollup2",
                    patterns: [{
                        function: "func2",
                        retentions: [{
                            age: 2000,
                            precision: 5,
                        }],
                    }],
                },
            ],
            kafka: {
                saslMechanism: "SASL_MECHANISM_GSSAPI",
                saslPassword: "pass1",
                saslUsername: "user1",
                securityProtocol: "SECURITY_PROTOCOL_PLAINTEXT",
            },
            kafkaTopics: [
                {
                    name: "topic1",
                    settings: {
                        saslMechanism: "SASL_MECHANISM_SCRAM_SHA_256",
                        saslPassword: "pass2",
                        saslUsername: "user2",
                        securityProtocol: "SECURITY_PROTOCOL_SSL",
                    },
                },
                {
                    name: "topic2",
                    settings: {
                        saslMechanism: "SASL_MECHANISM_PLAIN",
                        securityProtocol: "SECURITY_PROTOCOL_SASL_PLAINTEXT",
                    },
                },
            ],
            keepAliveTimeout: 3000,
            logLevel: "TRACE",
            markCacheSize: 5368709120,
            maxConcurrentQueries: 50,
            maxConnections: 100,
            maxPartitionSizeToDrop: 53687091200,
            maxTableSizeToDrop: 53687091200,
            mergeTree: {
                maxBytesToMergeAtMinSpaceInPool: 1048576,
                maxReplicatedMergesInQueue: 16,
                numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 8,
                partsToDelayInsert: 150,
                partsToThrowInsert: 300,
                replicatedDeduplicationWindow: 100,
                replicatedDeduplicationWindowSeconds: 604800,
            },
            metricLogEnabled: true,
            metricLogRetentionSize: 536870912,
            metricLogRetentionTime: 2592000,
            partLogRetentionSize: 536870912,
            partLogRetentionTime: 2592000,
            queryLogRetentionSize: 1073741824,
            queryLogRetentionTime: 2592000,
            queryThreadLogEnabled: true,
            queryThreadLogRetentionSize: 536870912,
            queryThreadLogRetentionTime: 2592000,
            rabbitmq: {
                password: "rabbit_pass",
                username: "rabbit_user",
            },
            textLogEnabled: true,
            textLogLevel: "TRACE",
            textLogRetentionSize: 536870912,
            textLogRetentionTime: 2592000,
            timezone: "UTC",
            traceLogEnabled: true,
            traceLogRetentionSize: 536870912,
            traceLogRetentionTime: 2592000,
            uncompressedCacheSize: 8589934592,
        },
        resources: {
            diskSize: 32,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
    cloudStorage: {
        enabled: false,
    },
    databases: [{
        name: "db_name",
    }],
    environment: "PRESTABLE",
    formatSchemas: [{
        name: "test_schema",
        type: "FORMAT_SCHEMA_TYPE_CAPNPROTO",
        uri: "https://storage.yandexcloud.net/ch-data/schema.proto",
    }],
    hosts: [{
        subnetId: fooVpcSubnet.id,
        type: "CLICKHOUSE",
        zone: "ru-central1-a",
    }],
    maintenanceWindow: {
        type: "ANYTIME",
    },
    mlModels: [{
        name: "test_model",
        type: "ML_MODEL_TYPE_CATBOOST",
        uri: "https://storage.yandexcloud.net/ch-data/train.csv",
    }],
    networkId: fooVpcNetwork.id,
    serviceAccountId: "your_service_account_id",
    users: [{
        name: "user",
        password: "your_password",
        permissions: [{
            databaseName: "db_name",
        }],
        quotas: [
            {
                errors: 1000,
                intervalDuration: 3600000,
                queries: 10000,
            },
            {
                error: 5000,
                intervalDuration: 79800000,
                queries: 50000,
            },
        ],
        settings: {
            maxMemoryUsageForUser: 1000000000,
            outputFormatJsonQuote64bitIntegers: true,
            readOverflowMode: "throw",
        },
    }],
});
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.5.0.0/24"],
    zone="ru-central1-a")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
    clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
        config=yandex.MdbClickhouseClusterClickhouseConfigArgs(
            background_pool_size=16,
            background_schedule_pool_size=16,
            compression=[
                {
                    "method": "LZ4",
                    "minPartSize": 1024,
                    "minPartSizeRatio": 0.5,
                },
                {
                    "method": "ZSTD",
                    "minPartSize": 2048,
                    "minPartSizeRatio": 0.7,
                },
            ],
            geobase_uri="",
            graphite_rollup=[
                {
                    "name": "rollup1",
                    "pattern": [{
                        "function": "func1",
                        "regexp": "abc",
                        "retention": [{
                            "age": 1000,
                            "precision": 3,
                        }],
                    }],
                },
                {
                    "name": "rollup2",
                    "pattern": [{
                        "function": "func2",
                        "retention": [{
                            "age": 2000,
                            "precision": 5,
                        }],
                    }],
                },
            ],
            kafka=yandex.MdbClickhouseClusterClickhouseConfigKafkaArgs(
                sasl_mechanism="SASL_MECHANISM_GSSAPI",
                sasl_password="pass1",
                sasl_username="user1",
                security_protocol="SECURITY_PROTOCOL_PLAINTEXT",
            ),
            kafka_topic=[
                {
                    "name": "topic1",
                    "settings": {
                        "saslMechanism": "SASL_MECHANISM_SCRAM_SHA_256",
                        "saslPassword": "pass2",
                        "saslUsername": "user2",
                        "securityProtocol": "SECURITY_PROTOCOL_SSL",
                    },
                },
                {
                    "name": "topic2",
                    "settings": {
                        "saslMechanism": "SASL_MECHANISM_PLAIN",
                        "securityProtocol": "SECURITY_PROTOCOL_SASL_PLAINTEXT",
                    },
                },
            ],
            keep_alive_timeout=3000,
            log_level="TRACE",
            mark_cache_size=5368709120,
            max_concurrent_queries=50,
            max_connections=100,
            max_partition_size_to_drop=53687091200,
            max_table_size_to_drop=53687091200,
            merge_tree=yandex.MdbClickhouseClusterClickhouseConfigMergeTreeArgs(
                max_bytes_to_merge_at_min_space_in_pool=1048576,
                max_replicated_merges_in_queue=16,
                number_of_free_entries_in_pool_to_lower_max_size_of_merge=8,
                parts_to_delay_insert=150,
                parts_to_throw_insert=300,
                replicated_deduplication_window=100,
                replicated_deduplication_window_seconds=604800,
            ),
            metric_log_enabled=True,
            metric_log_retention_size=536870912,
            metric_log_retention_time=2592000,
            part_log_retention_size=536870912,
            part_log_retention_time=2592000,
            query_log_retention_size=1073741824,
            query_log_retention_time=2592000,
            query_thread_log_enabled=True,
            query_thread_log_retention_size=536870912,
            query_thread_log_retention_time=2592000,
            rabbitmq=yandex.MdbClickhouseClusterClickhouseConfigRabbitmqArgs(
                password="rabbit_pass",
                username="rabbit_user",
            ),
            text_log_enabled=True,
            text_log_level="TRACE",
            text_log_retention_size=536870912,
            text_log_retention_time=2592000,
            timezone="UTC",
            trace_log_enabled=True,
            trace_log_retention_size=536870912,
            trace_log_retention_time=2592000,
            uncompressed_cache_size=8589934592,
        ),
        resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
            disk_size=32,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ),
    cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
        enabled=False,
    ),
    databases=[yandex.MdbClickhouseClusterDatabaseArgs(
        name="db_name",
    )],
    environment="PRESTABLE",
    format_schemas=[yandex.MdbClickhouseClusterFormatSchemaArgs(
        name="test_schema",
        type="FORMAT_SCHEMA_TYPE_CAPNPROTO",
        uri="https://storage.yandexcloud.net/ch-data/schema.proto",
    )],
    hosts=[yandex.MdbClickhouseClusterHostArgs(
        subnet_id=foo_vpc_subnet.id,
        type="CLICKHOUSE",
        zone="ru-central1-a",
    )],
    maintenance_window=yandex.MdbClickhouseClusterMaintenanceWindowArgs(
        type="ANYTIME",
    ),
    ml_models=[yandex.MdbClickhouseClusterMlModelArgs(
        name="test_model",
        type="ML_MODEL_TYPE_CATBOOST",
        uri="https://storage.yandexcloud.net/ch-data/train.csv",
    )],
    network_id=foo_vpc_network.id,
    service_account_id="your_service_account_id",
    users=[yandex.MdbClickhouseClusterUserArgs(
        name="user",
        password="your_password",
        permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
            database_name="db_name",
        )],
        quotas=[
            yandex.MdbClickhouseClusterUserQuotaArgs(
                errors=1000,
                interval_duration=3600000,
                queries=10000,
            ),
            yandex.MdbClickhouseClusterUserQuotaArgs(
                error=5000,
                interval_duration=79800000,
                queries=50000,
            ),
        ],
        settings=yandex.MdbClickhouseClusterUserSettingsArgs(
            max_memory_usage_for_user=1000000000,
            output_format_json_quote64bit_integers=True,
            read_overflow_mode="throw",
        ),
    )])
Coming soon!
Example of creating a HA ClickHouse Cluster.
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.1.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var bar = new Yandex.VpcSubnet("bar", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.2.0.0/24",
            },
            Zone = "ru-central1-b",
        });
        var baz = new Yandex.VpcSubnet("baz", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.3.0.0/24",
            },
            Zone = "ru-central1-c",
        });
        var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
        {
            Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
            {
                Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
                {
                    DiskSize = 16,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
            CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
            {
                Enabled = false,
            },
            Databases = 
            {
                new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
                {
                    Name = "db_name",
                },
            },
            Environment = "PRESTABLE",
            Hosts = 
            {
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = fooVpcSubnet.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-a",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = bar.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-b",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = fooVpcSubnet.Id,
                    Type = "ZOOKEEPER",
                    Zone = "ru-central1-a",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = bar.Id,
                    Type = "ZOOKEEPER",
                    Zone = "ru-central1-b",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = baz.Id,
                    Type = "ZOOKEEPER",
                    Zone = "ru-central1-c",
                },
            },
            NetworkId = fooVpcNetwork.Id,
            Users = 
            {
                new Yandex.Inputs.MdbClickhouseClusterUserArgs
                {
                    Name = "user",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
                        {
                            DatabaseName = "db_name",
                        },
                    },
                    Quotas = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Errors = 1000,
                            IntervalDuration = 3600000,
                            Queries = 10000,
                        },
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Error = 5000,
                            IntervalDuration = 79800000,
                            Queries = 50000,
                        },
                    },
                    Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
                    {
                        MaxMemoryUsageForUser = 1000000000,
                        OutputFormatJsonQuote64bitIntegers = true,
                        ReadOverflowMode = "throw",
                    },
                },
            },
            Zookeeper = new Yandex.Inputs.MdbClickhouseClusterZookeeperArgs
            {
                Resources = new Yandex.Inputs.MdbClickhouseClusterZookeeperResourcesArgs
                {
                    DiskSize = 10,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
        });
    }
}
package main
import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.1.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		bar, err := yandex.NewVpcSubnet(ctx, "bar", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.2.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-b"),
		})
		if err != nil {
			return err
		}
		baz, err := yandex.NewVpcSubnet(ctx, "baz", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.3.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-c"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
			Clickhouse: &MdbClickhouseClusterClickhouseArgs{
				Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
					DiskSize:         pulumi.Int(16),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
			CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
				Enabled: pulumi.Bool(false),
			},
			Databases: MdbClickhouseClusterDatabaseArray{
				&MdbClickhouseClusterDatabaseArgs{
					Name: pulumi.String("db_name"),
				},
			},
			Environment: pulumi.String("PRESTABLE"),
			Hosts: MdbClickhouseClusterHostArray{
				&MdbClickhouseClusterHostArgs{
					SubnetId: fooVpcSubnet.ID(),
					Type:     pulumi.String("CLICKHOUSE"),
					Zone:     pulumi.String("ru-central1-a"),
				},
				&MdbClickhouseClusterHostArgs{
					SubnetId: bar.ID(),
					Type:     pulumi.String("CLICKHOUSE"),
					Zone:     pulumi.String("ru-central1-b"),
				},
				&MdbClickhouseClusterHostArgs{
					SubnetId: fooVpcSubnet.ID(),
					Type:     pulumi.String("ZOOKEEPER"),
					Zone:     pulumi.String("ru-central1-a"),
				},
				&MdbClickhouseClusterHostArgs{
					SubnetId: bar.ID(),
					Type:     pulumi.String("ZOOKEEPER"),
					Zone:     pulumi.String("ru-central1-b"),
				},
				&MdbClickhouseClusterHostArgs{
					SubnetId: baz.ID(),
					Type:     pulumi.String("ZOOKEEPER"),
					Zone:     pulumi.String("ru-central1-c"),
				},
			},
			NetworkId: fooVpcNetwork.ID(),
			Users: MdbClickhouseClusterUserArray{
				&MdbClickhouseClusterUserArgs{
					Name:     pulumi.String("user"),
					Password: pulumi.String("password"),
					Permissions: MdbClickhouseClusterUserPermissionArray{
						&MdbClickhouseClusterUserPermissionArgs{
							DatabaseName: pulumi.String("db_name"),
						},
					},
					Quotas: MdbClickhouseClusterUserQuotaArray{
						&MdbClickhouseClusterUserQuotaArgs{
							Errors:           pulumi.Int(1000),
							IntervalDuration: pulumi.Int(3600000),
							Queries:          pulumi.Int(10000),
						},
						&MdbClickhouseClusterUserQuotaArgs{
							Error:            5000,
							IntervalDuration: pulumi.Int(79800000),
							Queries:          pulumi.Int(50000),
						},
					},
					Settings: &MdbClickhouseClusterUserSettingsArgs{
						MaxMemoryUsageForUser:              pulumi.Int(1000000000),
						OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
						ReadOverflowMode:                   pulumi.String("throw"),
					},
				},
			},
			Zookeeper: &MdbClickhouseClusterZookeeperArgs{
				Resources: &MdbClickhouseClusterZookeeperResourcesArgs{
					DiskSize:         pulumi.Int(10),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Coming soon!
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.1.0.0/24"],
    zone: "ru-central1-a",
});
const bar = new yandex.VpcSubnet("bar", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.2.0.0/24"],
    zone: "ru-central1-b",
});
const baz = new yandex.VpcSubnet("baz", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.3.0.0/24"],
    zone: "ru-central1-c",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
    clickhouse: {
        resources: {
            diskSize: 16,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
    cloudStorage: {
        enabled: false,
    },
    databases: [{
        name: "db_name",
    }],
    environment: "PRESTABLE",
    hosts: [
        {
            subnetId: fooVpcSubnet.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-a",
        },
        {
            subnetId: bar.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-b",
        },
        {
            subnetId: fooVpcSubnet.id,
            type: "ZOOKEEPER",
            zone: "ru-central1-a",
        },
        {
            subnetId: bar.id,
            type: "ZOOKEEPER",
            zone: "ru-central1-b",
        },
        {
            subnetId: baz.id,
            type: "ZOOKEEPER",
            zone: "ru-central1-c",
        },
    ],
    networkId: fooVpcNetwork.id,
    users: [{
        name: "user",
        password: "password",
        permissions: [{
            databaseName: "db_name",
        }],
        quotas: [
            {
                errors: 1000,
                intervalDuration: 3600000,
                queries: 10000,
            },
            {
                error: 5000,
                intervalDuration: 79800000,
                queries: 50000,
            },
        ],
        settings: {
            maxMemoryUsageForUser: 1000000000,
            outputFormatJsonQuote64bitIntegers: true,
            readOverflowMode: "throw",
        },
    }],
    zookeeper: {
        resources: {
            diskSize: 10,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
});
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.1.0.0/24"],
    zone="ru-central1-a")
bar = yandex.VpcSubnet("bar",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.2.0.0/24"],
    zone="ru-central1-b")
baz = yandex.VpcSubnet("baz",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.3.0.0/24"],
    zone="ru-central1-c")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
    clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
        resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
            disk_size=16,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ),
    cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
        enabled=False,
    ),
    databases=[yandex.MdbClickhouseClusterDatabaseArgs(
        name="db_name",
    )],
    environment="PRESTABLE",
    hosts=[
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=foo_vpc_subnet.id,
            type="CLICKHOUSE",
            zone="ru-central1-a",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=bar.id,
            type="CLICKHOUSE",
            zone="ru-central1-b",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=foo_vpc_subnet.id,
            type="ZOOKEEPER",
            zone="ru-central1-a",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=bar.id,
            type="ZOOKEEPER",
            zone="ru-central1-b",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=baz.id,
            type="ZOOKEEPER",
            zone="ru-central1-c",
        ),
    ],
    network_id=foo_vpc_network.id,
    users=[yandex.MdbClickhouseClusterUserArgs(
        name="user",
        password="password",
        permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
            database_name="db_name",
        )],
        quotas=[
            yandex.MdbClickhouseClusterUserQuotaArgs(
                errors=1000,
                interval_duration=3600000,
                queries=10000,
            ),
            yandex.MdbClickhouseClusterUserQuotaArgs(
                error=5000,
                interval_duration=79800000,
                queries=50000,
            ),
        ],
        settings=yandex.MdbClickhouseClusterUserSettingsArgs(
            max_memory_usage_for_user=1000000000,
            output_format_json_quote64bit_integers=True,
            read_overflow_mode="throw",
        ),
    )],
    zookeeper=yandex.MdbClickhouseClusterZookeeperArgs(
        resources=yandex.MdbClickhouseClusterZookeeperResourcesArgs(
            disk_size=10,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ))
Coming soon!
Example of creating a sharded ClickHouse Cluster.
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.1.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var bar = new Yandex.VpcSubnet("bar", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.2.0.0/24",
            },
            Zone = "ru-central1-b",
        });
        var baz = new Yandex.VpcSubnet("baz", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.3.0.0/24",
            },
            Zone = "ru-central1-c",
        });
        var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
        {
            Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
            {
                Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
                {
                    DiskSize = 16,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
            CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
            {
                Enabled = false,
            },
            Databases = 
            {
                new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
                {
                    Name = "db_name",
                },
            },
            Environment = "PRODUCTION",
            Hosts = 
            {
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    ShardName = "shard1",
                    SubnetId = fooVpcSubnet.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-a",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    ShardName = "shard1",
                    SubnetId = bar.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-b",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    ShardName = "shard2",
                    SubnetId = bar.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-b",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    ShardName = "shard2",
                    SubnetId = baz.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-c",
                },
            },
            NetworkId = fooVpcNetwork.Id,
            ShardGroups = 
            {
                new Yandex.Inputs.MdbClickhouseClusterShardGroupArgs
                {
                    Description = "Cluster configuration that contain only shard1",
                    Name = "single_shard_group",
                    ShardNames = 
                    {
                        "shard1",
                    },
                },
            },
            Users = 
            {
                new Yandex.Inputs.MdbClickhouseClusterUserArgs
                {
                    Name = "user",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
                        {
                            DatabaseName = "db_name",
                        },
                    },
                    Quotas = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Errors = 1000,
                            IntervalDuration = 3600000,
                            Queries = 10000,
                        },
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Error = 5000,
                            IntervalDuration = 79800000,
                            Queries = 50000,
                        },
                    },
                    Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
                    {
                        MaxMemoryUsageForUser = 1000000000,
                        OutputFormatJsonQuote64bitIntegers = true,
                        ReadOverflowMode = "throw",
                    },
                },
            },
            Zookeeper = new Yandex.Inputs.MdbClickhouseClusterZookeeperArgs
            {
                Resources = new Yandex.Inputs.MdbClickhouseClusterZookeeperResourcesArgs
                {
                    DiskSize = 10,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
        });
    }
}
package main
import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.1.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		bar, err := yandex.NewVpcSubnet(ctx, "bar", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.2.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-b"),
		})
		if err != nil {
			return err
		}
		baz, err := yandex.NewVpcSubnet(ctx, "baz", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.3.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-c"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
			Clickhouse: &MdbClickhouseClusterClickhouseArgs{
				Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
					DiskSize:         pulumi.Int(16),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
			CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
				Enabled: pulumi.Bool(false),
			},
			Databases: MdbClickhouseClusterDatabaseArray{
				&MdbClickhouseClusterDatabaseArgs{
					Name: pulumi.String("db_name"),
				},
			},
			Environment: pulumi.String("PRODUCTION"),
			Hosts: MdbClickhouseClusterHostArray{
				&MdbClickhouseClusterHostArgs{
					ShardName: pulumi.String("shard1"),
					SubnetId:  fooVpcSubnet.ID(),
					Type:      pulumi.String("CLICKHOUSE"),
					Zone:      pulumi.String("ru-central1-a"),
				},
				&MdbClickhouseClusterHostArgs{
					ShardName: pulumi.String("shard1"),
					SubnetId:  bar.ID(),
					Type:      pulumi.String("CLICKHOUSE"),
					Zone:      pulumi.String("ru-central1-b"),
				},
				&MdbClickhouseClusterHostArgs{
					ShardName: pulumi.String("shard2"),
					SubnetId:  bar.ID(),
					Type:      pulumi.String("CLICKHOUSE"),
					Zone:      pulumi.String("ru-central1-b"),
				},
				&MdbClickhouseClusterHostArgs{
					ShardName: pulumi.String("shard2"),
					SubnetId:  baz.ID(),
					Type:      pulumi.String("CLICKHOUSE"),
					Zone:      pulumi.String("ru-central1-c"),
				},
			},
			NetworkId: fooVpcNetwork.ID(),
			ShardGroups: MdbClickhouseClusterShardGroupArray{
				&MdbClickhouseClusterShardGroupArgs{
					Description: pulumi.String("Cluster configuration that contain only shard1"),
					Name:        pulumi.String("single_shard_group"),
					ShardNames: pulumi.StringArray{
						pulumi.String("shard1"),
					},
				},
			},
			Users: MdbClickhouseClusterUserArray{
				&MdbClickhouseClusterUserArgs{
					Name:     pulumi.String("user"),
					Password: pulumi.String("password"),
					Permissions: MdbClickhouseClusterUserPermissionArray{
						&MdbClickhouseClusterUserPermissionArgs{
							DatabaseName: pulumi.String("db_name"),
						},
					},
					Quotas: MdbClickhouseClusterUserQuotaArray{
						&MdbClickhouseClusterUserQuotaArgs{
							Errors:           pulumi.Int(1000),
							IntervalDuration: pulumi.Int(3600000),
							Queries:          pulumi.Int(10000),
						},
						&MdbClickhouseClusterUserQuotaArgs{
							Error:            5000,
							IntervalDuration: pulumi.Int(79800000),
							Queries:          pulumi.Int(50000),
						},
					},
					Settings: &MdbClickhouseClusterUserSettingsArgs{
						MaxMemoryUsageForUser:              pulumi.Int(1000000000),
						OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
						ReadOverflowMode:                   pulumi.String("throw"),
					},
				},
			},
			Zookeeper: &MdbClickhouseClusterZookeeperArgs{
				Resources: &MdbClickhouseClusterZookeeperResourcesArgs{
					DiskSize:         pulumi.Int(10),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Coming soon!
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.1.0.0/24"],
    zone: "ru-central1-a",
});
const bar = new yandex.VpcSubnet("bar", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.2.0.0/24"],
    zone: "ru-central1-b",
});
const baz = new yandex.VpcSubnet("baz", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.3.0.0/24"],
    zone: "ru-central1-c",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
    clickhouse: {
        resources: {
            diskSize: 16,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
    cloudStorage: {
        enabled: false,
    },
    databases: [{
        name: "db_name",
    }],
    environment: "PRODUCTION",
    hosts: [
        {
            shardName: "shard1",
            subnetId: fooVpcSubnet.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-a",
        },
        {
            shardName: "shard1",
            subnetId: bar.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-b",
        },
        {
            shardName: "shard2",
            subnetId: bar.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-b",
        },
        {
            shardName: "shard2",
            subnetId: baz.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-c",
        },
    ],
    networkId: fooVpcNetwork.id,
    shardGroups: [{
        description: "Cluster configuration that contain only shard1",
        name: "single_shard_group",
        shardNames: ["shard1"],
    }],
    users: [{
        name: "user",
        password: "password",
        permissions: [{
            databaseName: "db_name",
        }],
        quotas: [
            {
                errors: 1000,
                intervalDuration: 3600000,
                queries: 10000,
            },
            {
                error: 5000,
                intervalDuration: 79800000,
                queries: 50000,
            },
        ],
        settings: {
            maxMemoryUsageForUser: 1000000000,
            outputFormatJsonQuote64bitIntegers: true,
            readOverflowMode: "throw",
        },
    }],
    zookeeper: {
        resources: {
            diskSize: 10,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
});
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.1.0.0/24"],
    zone="ru-central1-a")
bar = yandex.VpcSubnet("bar",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.2.0.0/24"],
    zone="ru-central1-b")
baz = yandex.VpcSubnet("baz",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.3.0.0/24"],
    zone="ru-central1-c")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
    clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
        resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
            disk_size=16,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ),
    cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
        enabled=False,
    ),
    databases=[yandex.MdbClickhouseClusterDatabaseArgs(
        name="db_name",
    )],
    environment="PRODUCTION",
    hosts=[
        yandex.MdbClickhouseClusterHostArgs(
            shard_name="shard1",
            subnet_id=foo_vpc_subnet.id,
            type="CLICKHOUSE",
            zone="ru-central1-a",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            shard_name="shard1",
            subnet_id=bar.id,
            type="CLICKHOUSE",
            zone="ru-central1-b",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            shard_name="shard2",
            subnet_id=bar.id,
            type="CLICKHOUSE",
            zone="ru-central1-b",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            shard_name="shard2",
            subnet_id=baz.id,
            type="CLICKHOUSE",
            zone="ru-central1-c",
        ),
    ],
    network_id=foo_vpc_network.id,
    shard_groups=[yandex.MdbClickhouseClusterShardGroupArgs(
        description="Cluster configuration that contain only shard1",
        name="single_shard_group",
        shard_names=["shard1"],
    )],
    users=[yandex.MdbClickhouseClusterUserArgs(
        name="user",
        password="password",
        permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
            database_name="db_name",
        )],
        quotas=[
            yandex.MdbClickhouseClusterUserQuotaArgs(
                errors=1000,
                interval_duration=3600000,
                queries=10000,
            ),
            yandex.MdbClickhouseClusterUserQuotaArgs(
                error=5000,
                interval_duration=79800000,
                queries=50000,
            ),
        ],
        settings=yandex.MdbClickhouseClusterUserSettingsArgs(
            max_memory_usage_for_user=1000000000,
            output_format_json_quote64bit_integers=True,
            read_overflow_mode="throw",
        ),
    )],
    zookeeper=yandex.MdbClickhouseClusterZookeeperArgs(
        resources=yandex.MdbClickhouseClusterZookeeperResourcesArgs(
            disk_size=10,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ))
Coming soon!
Create MdbClickhouseCluster Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MdbClickhouseCluster(name: string, args: MdbClickhouseClusterArgs, opts?: CustomResourceOptions);@overload
def MdbClickhouseCluster(resource_name: str,
                         args: MdbClickhouseClusterArgs,
                         opts: Optional[ResourceOptions] = None)
@overload
def MdbClickhouseCluster(resource_name: str,
                         opts: Optional[ResourceOptions] = None,
                         environment: Optional[str] = None,
                         network_id: Optional[str] = None,
                         hosts: Optional[Sequence[MdbClickhouseClusterHostArgs]] = None,
                         clickhouse: Optional[MdbClickhouseClusterClickhouseArgs] = None,
                         databases: Optional[Sequence[MdbClickhouseClusterDatabaseArgs]] = None,
                         ml_models: Optional[Sequence[MdbClickhouseClusterMlModelArgs]] = None,
                         access: Optional[MdbClickhouseClusterAccessArgs] = None,
                         deletion_protection: Optional[bool] = None,
                         description: Optional[str] = None,
                         cloud_storage: Optional[MdbClickhouseClusterCloudStorageArgs] = None,
                         folder_id: Optional[str] = None,
                         format_schemas: Optional[Sequence[MdbClickhouseClusterFormatSchemaArgs]] = None,
                         backup_window_start: Optional[MdbClickhouseClusterBackupWindowStartArgs] = None,
                         labels: Optional[Mapping[str, str]] = None,
                         maintenance_window: Optional[MdbClickhouseClusterMaintenanceWindowArgs] = None,
                         copy_schema_on_new_hosts: Optional[bool] = None,
                         name: Optional[str] = None,
                         admin_password: Optional[str] = None,
                         security_group_ids: Optional[Sequence[str]] = None,
                         service_account_id: Optional[str] = None,
                         shard_groups: Optional[Sequence[MdbClickhouseClusterShardGroupArgs]] = None,
                         sql_database_management: Optional[bool] = None,
                         sql_user_management: Optional[bool] = None,
                         users: Optional[Sequence[MdbClickhouseClusterUserArgs]] = None,
                         version: Optional[str] = None,
                         zookeeper: Optional[MdbClickhouseClusterZookeeperArgs] = None)func NewMdbClickhouseCluster(ctx *Context, name string, args MdbClickhouseClusterArgs, opts ...ResourceOption) (*MdbClickhouseCluster, error)public MdbClickhouseCluster(string name, MdbClickhouseClusterArgs args, CustomResourceOptions? opts = null)
public MdbClickhouseCluster(String name, MdbClickhouseClusterArgs args)
public MdbClickhouseCluster(String name, MdbClickhouseClusterArgs args, CustomResourceOptions options)
type: yandex:MdbClickhouseCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MdbClickhouseClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var mdbClickhouseClusterResource = new Yandex.MdbClickhouseCluster("mdbClickhouseClusterResource", new()
{
    Environment = "string",
    NetworkId = "string",
    Hosts = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterHostArgs
        {
            Type = "string",
            Zone = "string",
            AssignPublicIp = false,
            Fqdn = "string",
            ShardName = "string",
            SubnetId = "string",
        },
    },
    Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
    {
        Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
        {
            DiskSize = 0,
            DiskTypeId = "string",
            ResourcePresetId = "string",
        },
        Config = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigArgs
        {
            BackgroundPoolSize = 0,
            BackgroundSchedulePoolSize = 0,
            Compressions = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigCompressionArgs
                {
                    Method = "string",
                    MinPartSize = 0,
                    MinPartSizeRatio = 0,
                },
            },
            GeobaseUri = "string",
            GraphiteRollups = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs
                {
                    Name = "string",
                    Patterns = new[]
                    {
                        new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs
                        {
                            Function = "string",
                            Regexp = "string",
                            Retentions = new[]
                            {
                                new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs
                                {
                                    Age = 0,
                                    Precision = 0,
                                },
                            },
                        },
                    },
                },
            },
            Kafka = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaArgs
            {
                SaslMechanism = "string",
                SaslPassword = "string",
                SaslUsername = "string",
                SecurityProtocol = "string",
            },
            KafkaTopics = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaTopicArgs
                {
                    Name = "string",
                    Settings = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs
                    {
                        SaslMechanism = "string",
                        SaslPassword = "string",
                        SaslUsername = "string",
                        SecurityProtocol = "string",
                    },
                },
            },
            KeepAliveTimeout = 0,
            LogLevel = "string",
            MarkCacheSize = 0,
            MaxConcurrentQueries = 0,
            MaxConnections = 0,
            MaxPartitionSizeToDrop = 0,
            MaxTableSizeToDrop = 0,
            MergeTree = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigMergeTreeArgs
            {
                MaxBytesToMergeAtMinSpaceInPool = 0,
                MaxReplicatedMergesInQueue = 0,
                NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge = 0,
                PartsToDelayInsert = 0,
                PartsToThrowInsert = 0,
                ReplicatedDeduplicationWindow = 0,
                ReplicatedDeduplicationWindowSeconds = 0,
            },
            MetricLogEnabled = false,
            MetricLogRetentionSize = 0,
            MetricLogRetentionTime = 0,
            PartLogRetentionSize = 0,
            PartLogRetentionTime = 0,
            QueryLogRetentionSize = 0,
            QueryLogRetentionTime = 0,
            QueryThreadLogEnabled = false,
            QueryThreadLogRetentionSize = 0,
            QueryThreadLogRetentionTime = 0,
            Rabbitmq = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigRabbitmqArgs
            {
                Password = "string",
                Username = "string",
            },
            TextLogEnabled = false,
            TextLogLevel = "string",
            TextLogRetentionSize = 0,
            TextLogRetentionTime = 0,
            Timezone = "string",
            TraceLogEnabled = false,
            TraceLogRetentionSize = 0,
            TraceLogRetentionTime = 0,
            UncompressedCacheSize = 0,
        },
    },
    Databases = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
        {
            Name = "string",
        },
    },
    MlModels = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterMlModelArgs
        {
            Name = "string",
            Type = "string",
            Uri = "string",
        },
    },
    Access = new Yandex.Inputs.MdbClickhouseClusterAccessArgs
    {
        DataLens = false,
        Metrika = false,
        Serverless = false,
        WebSql = false,
    },
    DeletionProtection = false,
    Description = "string",
    CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
    {
        Enabled = false,
    },
    FolderId = "string",
    FormatSchemas = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterFormatSchemaArgs
        {
            Name = "string",
            Type = "string",
            Uri = "string",
        },
    },
    BackupWindowStart = new Yandex.Inputs.MdbClickhouseClusterBackupWindowStartArgs
    {
        Hours = 0,
        Minutes = 0,
    },
    Labels = 
    {
        { "string", "string" },
    },
    MaintenanceWindow = new Yandex.Inputs.MdbClickhouseClusterMaintenanceWindowArgs
    {
        Type = "string",
        Day = "string",
        Hour = 0,
    },
    CopySchemaOnNewHosts = false,
    Name = "string",
    AdminPassword = "string",
    SecurityGroupIds = new[]
    {
        "string",
    },
    ServiceAccountId = "string",
    ShardGroups = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterShardGroupArgs
        {
            Name = "string",
            ShardNames = new[]
            {
                "string",
            },
            Description = "string",
        },
    },
    SqlDatabaseManagement = false,
    SqlUserManagement = false,
    Users = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterUserArgs
        {
            Name = "string",
            Password = "string",
            Permissions = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
                {
                    DatabaseName = "string",
                },
            },
            Quotas = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                {
                    IntervalDuration = 0,
                    Errors = 0,
                    ExecutionTime = 0,
                    Queries = 0,
                    ReadRows = 0,
                    ResultRows = 0,
                },
            },
            Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
            {
                AddHttpCorsHeader = false,
                AllowDdl = false,
                Compile = false,
                CompileExpressions = false,
                ConnectTimeout = 0,
                CountDistinctImplementation = "string",
                DistinctOverflowMode = "string",
                DistributedAggregationMemoryEfficient = false,
                DistributedDdlTaskTimeout = 0,
                DistributedProductMode = "string",
                EmptyResultForAggregationByEmptySet = false,
                EnableHttpCompression = false,
                FallbackToStaleReplicasForDistributedQueries = false,
                ForceIndexByDate = false,
                ForcePrimaryKey = false,
                GroupByOverflowMode = "string",
                GroupByTwoLevelThreshold = 0,
                GroupByTwoLevelThresholdBytes = 0,
                HttpConnectionTimeout = 0,
                HttpHeadersProgressInterval = 0,
                HttpReceiveTimeout = 0,
                HttpSendTimeout = 0,
                InputFormatDefaultsForOmittedFields = false,
                InputFormatValuesInterpretExpressions = false,
                InsertQuorum = 0,
                InsertQuorumTimeout = 0,
                JoinOverflowMode = "string",
                JoinUseNulls = false,
                JoinedSubqueryRequiresAlias = false,
                LowCardinalityAllowInNativeFormat = false,
                MaxAstDepth = 0,
                MaxAstElements = 0,
                MaxBlockSize = 0,
                MaxBytesBeforeExternalGroupBy = 0,
                MaxBytesBeforeExternalSort = 0,
                MaxBytesInDistinct = 0,
                MaxBytesInJoin = 0,
                MaxBytesInSet = 0,
                MaxBytesToRead = 0,
                MaxBytesToSort = 0,
                MaxBytesToTransfer = 0,
                MaxColumnsToRead = 0,
                MaxExecutionTime = 0,
                MaxExpandedAstElements = 0,
                MaxInsertBlockSize = 0,
                MaxMemoryUsage = 0,
                MaxMemoryUsageForUser = 0,
                MaxNetworkBandwidth = 0,
                MaxNetworkBandwidthForUser = 0,
                MaxQuerySize = 0,
                MaxReplicaDelayForDistributedQueries = 0,
                MaxResultBytes = 0,
                MaxResultRows = 0,
                MaxRowsInDistinct = 0,
                MaxRowsInJoin = 0,
                MaxRowsInSet = 0,
                MaxRowsToGroupBy = 0,
                MaxRowsToRead = 0,
                MaxRowsToSort = 0,
                MaxRowsToTransfer = 0,
                MaxTemporaryColumns = 0,
                MaxTemporaryNonConstColumns = 0,
                MaxThreads = 0,
                MergeTreeMaxBytesToUseCache = 0,
                MergeTreeMaxRowsToUseCache = 0,
                MergeTreeMinBytesForConcurrentRead = 0,
                MergeTreeMinRowsForConcurrentRead = 0,
                MinBytesToUseDirectIo = 0,
                MinCountToCompile = 0,
                MinCountToCompileExpression = 0,
                MinExecutionSpeed = 0,
                MinExecutionSpeedBytes = 0,
                MinInsertBlockSizeBytes = 0,
                MinInsertBlockSizeRows = 0,
                OutputFormatJsonQuote64bitIntegers = false,
                OutputFormatJsonQuoteDenormals = false,
                Priority = 0,
                QuotaMode = "string",
                ReadOverflowMode = "string",
                Readonly = 0,
                ReceiveTimeout = 0,
                ReplicationAlterPartitionsSync = 0,
                ResultOverflowMode = "string",
                SelectSequentialConsistency = false,
                SendProgressInHttpHeaders = false,
                SendTimeout = 0,
                SetOverflowMode = "string",
                SkipUnavailableShards = false,
                SortOverflowMode = "string",
                TimeoutOverflowMode = "string",
                TransferOverflowMode = "string",
                TransformNullIn = false,
                UseUncompressedCache = false,
            },
        },
    },
    Version = "string",
    Zookeeper = new Yandex.Inputs.MdbClickhouseClusterZookeeperArgs
    {
        Resources = new Yandex.Inputs.MdbClickhouseClusterZookeeperResourcesArgs
        {
            DiskSize = 0,
            DiskTypeId = "string",
            ResourcePresetId = "string",
        },
    },
});
example, err := yandex.NewMdbClickhouseCluster(ctx, "mdbClickhouseClusterResource", &yandex.MdbClickhouseClusterArgs{
	Environment: pulumi.String("string"),
	NetworkId:   pulumi.String("string"),
	Hosts: yandex.MdbClickhouseClusterHostArray{
		&yandex.MdbClickhouseClusterHostArgs{
			Type:           pulumi.String("string"),
			Zone:           pulumi.String("string"),
			AssignPublicIp: pulumi.Bool(false),
			Fqdn:           pulumi.String("string"),
			ShardName:      pulumi.String("string"),
			SubnetId:       pulumi.String("string"),
		},
	},
	Clickhouse: &yandex.MdbClickhouseClusterClickhouseArgs{
		Resources: &yandex.MdbClickhouseClusterClickhouseResourcesArgs{
			DiskSize:         pulumi.Int(0),
			DiskTypeId:       pulumi.String("string"),
			ResourcePresetId: pulumi.String("string"),
		},
		Config: &yandex.MdbClickhouseClusterClickhouseConfigArgs{
			BackgroundPoolSize:         pulumi.Int(0),
			BackgroundSchedulePoolSize: pulumi.Int(0),
			Compressions: yandex.MdbClickhouseClusterClickhouseConfigCompressionArray{
				&yandex.MdbClickhouseClusterClickhouseConfigCompressionArgs{
					Method:           pulumi.String("string"),
					MinPartSize:      pulumi.Int(0),
					MinPartSizeRatio: pulumi.Float64(0),
				},
			},
			GeobaseUri: pulumi.String("string"),
			GraphiteRollups: yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupArray{
				&yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs{
					Name: pulumi.String("string"),
					Patterns: yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArray{
						&yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs{
							Function: pulumi.String("string"),
							Regexp:   pulumi.String("string"),
							Retentions: yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArray{
								&yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs{
									Age:       pulumi.Int(0),
									Precision: pulumi.Int(0),
								},
							},
						},
					},
				},
			},
			Kafka: &yandex.MdbClickhouseClusterClickhouseConfigKafkaArgs{
				SaslMechanism:    pulumi.String("string"),
				SaslPassword:     pulumi.String("string"),
				SaslUsername:     pulumi.String("string"),
				SecurityProtocol: pulumi.String("string"),
			},
			KafkaTopics: yandex.MdbClickhouseClusterClickhouseConfigKafkaTopicArray{
				&yandex.MdbClickhouseClusterClickhouseConfigKafkaTopicArgs{
					Name: pulumi.String("string"),
					Settings: &yandex.MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs{
						SaslMechanism:    pulumi.String("string"),
						SaslPassword:     pulumi.String("string"),
						SaslUsername:     pulumi.String("string"),
						SecurityProtocol: pulumi.String("string"),
					},
				},
			},
			KeepAliveTimeout:       pulumi.Int(0),
			LogLevel:               pulumi.String("string"),
			MarkCacheSize:          pulumi.Int(0),
			MaxConcurrentQueries:   pulumi.Int(0),
			MaxConnections:         pulumi.Int(0),
			MaxPartitionSizeToDrop: pulumi.Int(0),
			MaxTableSizeToDrop:     pulumi.Int(0),
			MergeTree: &yandex.MdbClickhouseClusterClickhouseConfigMergeTreeArgs{
				MaxBytesToMergeAtMinSpaceInPool:                pulumi.Int(0),
				MaxReplicatedMergesInQueue:                     pulumi.Int(0),
				NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: pulumi.Int(0),
				PartsToDelayInsert:                             pulumi.Int(0),
				PartsToThrowInsert:                             pulumi.Int(0),
				ReplicatedDeduplicationWindow:                  pulumi.Int(0),
				ReplicatedDeduplicationWindowSeconds:           pulumi.Int(0),
			},
			MetricLogEnabled:            pulumi.Bool(false),
			MetricLogRetentionSize:      pulumi.Int(0),
			MetricLogRetentionTime:      pulumi.Int(0),
			PartLogRetentionSize:        pulumi.Int(0),
			PartLogRetentionTime:        pulumi.Int(0),
			QueryLogRetentionSize:       pulumi.Int(0),
			QueryLogRetentionTime:       pulumi.Int(0),
			QueryThreadLogEnabled:       pulumi.Bool(false),
			QueryThreadLogRetentionSize: pulumi.Int(0),
			QueryThreadLogRetentionTime: pulumi.Int(0),
			Rabbitmq: &yandex.MdbClickhouseClusterClickhouseConfigRabbitmqArgs{
				Password: pulumi.String("string"),
				Username: pulumi.String("string"),
			},
			TextLogEnabled:        pulumi.Bool(false),
			TextLogLevel:          pulumi.String("string"),
			TextLogRetentionSize:  pulumi.Int(0),
			TextLogRetentionTime:  pulumi.Int(0),
			Timezone:              pulumi.String("string"),
			TraceLogEnabled:       pulumi.Bool(false),
			TraceLogRetentionSize: pulumi.Int(0),
			TraceLogRetentionTime: pulumi.Int(0),
			UncompressedCacheSize: pulumi.Int(0),
		},
	},
	Databases: yandex.MdbClickhouseClusterDatabaseArray{
		&yandex.MdbClickhouseClusterDatabaseArgs{
			Name: pulumi.String("string"),
		},
	},
	MlModels: yandex.MdbClickhouseClusterMlModelArray{
		&yandex.MdbClickhouseClusterMlModelArgs{
			Name: pulumi.String("string"),
			Type: pulumi.String("string"),
			Uri:  pulumi.String("string"),
		},
	},
	Access: &yandex.MdbClickhouseClusterAccessArgs{
		DataLens:   pulumi.Bool(false),
		Metrika:    pulumi.Bool(false),
		Serverless: pulumi.Bool(false),
		WebSql:     pulumi.Bool(false),
	},
	DeletionProtection: pulumi.Bool(false),
	Description:        pulumi.String("string"),
	CloudStorage: &yandex.MdbClickhouseClusterCloudStorageArgs{
		Enabled: pulumi.Bool(false),
	},
	FolderId: pulumi.String("string"),
	FormatSchemas: yandex.MdbClickhouseClusterFormatSchemaArray{
		&yandex.MdbClickhouseClusterFormatSchemaArgs{
			Name: pulumi.String("string"),
			Type: pulumi.String("string"),
			Uri:  pulumi.String("string"),
		},
	},
	BackupWindowStart: &yandex.MdbClickhouseClusterBackupWindowStartArgs{
		Hours:   pulumi.Int(0),
		Minutes: pulumi.Int(0),
	},
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	MaintenanceWindow: &yandex.MdbClickhouseClusterMaintenanceWindowArgs{
		Type: pulumi.String("string"),
		Day:  pulumi.String("string"),
		Hour: pulumi.Int(0),
	},
	CopySchemaOnNewHosts: pulumi.Bool(false),
	Name:                 pulumi.String("string"),
	AdminPassword:        pulumi.String("string"),
	SecurityGroupIds: pulumi.StringArray{
		pulumi.String("string"),
	},
	ServiceAccountId: pulumi.String("string"),
	ShardGroups: yandex.MdbClickhouseClusterShardGroupArray{
		&yandex.MdbClickhouseClusterShardGroupArgs{
			Name: pulumi.String("string"),
			ShardNames: pulumi.StringArray{
				pulumi.String("string"),
			},
			Description: pulumi.String("string"),
		},
	},
	SqlDatabaseManagement: pulumi.Bool(false),
	SqlUserManagement:     pulumi.Bool(false),
	Users: yandex.MdbClickhouseClusterUserArray{
		&yandex.MdbClickhouseClusterUserArgs{
			Name:     pulumi.String("string"),
			Password: pulumi.String("string"),
			Permissions: yandex.MdbClickhouseClusterUserPermissionArray{
				&yandex.MdbClickhouseClusterUserPermissionArgs{
					DatabaseName: pulumi.String("string"),
				},
			},
			Quotas: yandex.MdbClickhouseClusterUserQuotaArray{
				&yandex.MdbClickhouseClusterUserQuotaArgs{
					IntervalDuration: pulumi.Int(0),
					Errors:           pulumi.Int(0),
					ExecutionTime:    pulumi.Int(0),
					Queries:          pulumi.Int(0),
					ReadRows:         pulumi.Int(0),
					ResultRows:       pulumi.Int(0),
				},
			},
			Settings: &yandex.MdbClickhouseClusterUserSettingsArgs{
				AddHttpCorsHeader:                            pulumi.Bool(false),
				AllowDdl:                                     pulumi.Bool(false),
				Compile:                                      pulumi.Bool(false),
				CompileExpressions:                           pulumi.Bool(false),
				ConnectTimeout:                               pulumi.Int(0),
				CountDistinctImplementation:                  pulumi.String("string"),
				DistinctOverflowMode:                         pulumi.String("string"),
				DistributedAggregationMemoryEfficient:        pulumi.Bool(false),
				DistributedDdlTaskTimeout:                    pulumi.Int(0),
				DistributedProductMode:                       pulumi.String("string"),
				EmptyResultForAggregationByEmptySet:          pulumi.Bool(false),
				EnableHttpCompression:                        pulumi.Bool(false),
				FallbackToStaleReplicasForDistributedQueries: pulumi.Bool(false),
				ForceIndexByDate:                             pulumi.Bool(false),
				ForcePrimaryKey:                              pulumi.Bool(false),
				GroupByOverflowMode:                          pulumi.String("string"),
				GroupByTwoLevelThreshold:                     pulumi.Int(0),
				GroupByTwoLevelThresholdBytes:                pulumi.Int(0),
				HttpConnectionTimeout:                        pulumi.Int(0),
				HttpHeadersProgressInterval:                  pulumi.Int(0),
				HttpReceiveTimeout:                           pulumi.Int(0),
				HttpSendTimeout:                              pulumi.Int(0),
				InputFormatDefaultsForOmittedFields:          pulumi.Bool(false),
				InputFormatValuesInterpretExpressions:        pulumi.Bool(false),
				InsertQuorum:                                 pulumi.Int(0),
				InsertQuorumTimeout:                          pulumi.Int(0),
				JoinOverflowMode:                             pulumi.String("string"),
				JoinUseNulls:                                 pulumi.Bool(false),
				JoinedSubqueryRequiresAlias:                  pulumi.Bool(false),
				LowCardinalityAllowInNativeFormat:            pulumi.Bool(false),
				MaxAstDepth:                                  pulumi.Int(0),
				MaxAstElements:                               pulumi.Int(0),
				MaxBlockSize:                                 pulumi.Int(0),
				MaxBytesBeforeExternalGroupBy:                pulumi.Int(0),
				MaxBytesBeforeExternalSort:                   pulumi.Int(0),
				MaxBytesInDistinct:                           pulumi.Int(0),
				MaxBytesInJoin:                               pulumi.Int(0),
				MaxBytesInSet:                                pulumi.Int(0),
				MaxBytesToRead:                               pulumi.Int(0),
				MaxBytesToSort:                               pulumi.Int(0),
				MaxBytesToTransfer:                           pulumi.Int(0),
				MaxColumnsToRead:                             pulumi.Int(0),
				MaxExecutionTime:                             pulumi.Int(0),
				MaxExpandedAstElements:                       pulumi.Int(0),
				MaxInsertBlockSize:                           pulumi.Int(0),
				MaxMemoryUsage:                               pulumi.Int(0),
				MaxMemoryUsageForUser:                        pulumi.Int(0),
				MaxNetworkBandwidth:                          pulumi.Int(0),
				MaxNetworkBandwidthForUser:                   pulumi.Int(0),
				MaxQuerySize:                                 pulumi.Int(0),
				MaxReplicaDelayForDistributedQueries:         pulumi.Int(0),
				MaxResultBytes:                               pulumi.Int(0),
				MaxResultRows:                                pulumi.Int(0),
				MaxRowsInDistinct:                            pulumi.Int(0),
				MaxRowsInJoin:                                pulumi.Int(0),
				MaxRowsInSet:                                 pulumi.Int(0),
				MaxRowsToGroupBy:                             pulumi.Int(0),
				MaxRowsToRead:                                pulumi.Int(0),
				MaxRowsToSort:                                pulumi.Int(0),
				MaxRowsToTransfer:                            pulumi.Int(0),
				MaxTemporaryColumns:                          pulumi.Int(0),
				MaxTemporaryNonConstColumns:                  pulumi.Int(0),
				MaxThreads:                                   pulumi.Int(0),
				MergeTreeMaxBytesToUseCache:                  pulumi.Int(0),
				MergeTreeMaxRowsToUseCache:                   pulumi.Int(0),
				MergeTreeMinBytesForConcurrentRead:           pulumi.Int(0),
				MergeTreeMinRowsForConcurrentRead:            pulumi.Int(0),
				MinBytesToUseDirectIo:                        pulumi.Int(0),
				MinCountToCompile:                            pulumi.Int(0),
				MinCountToCompileExpression:                  pulumi.Int(0),
				MinExecutionSpeed:                            pulumi.Int(0),
				MinExecutionSpeedBytes:                       pulumi.Int(0),
				MinInsertBlockSizeBytes:                      pulumi.Int(0),
				MinInsertBlockSizeRows:                       pulumi.Int(0),
				OutputFormatJsonQuote64bitIntegers:           pulumi.Bool(false),
				OutputFormatJsonQuoteDenormals:               pulumi.Bool(false),
				Priority:                                     pulumi.Int(0),
				QuotaMode:                                    pulumi.String("string"),
				ReadOverflowMode:                             pulumi.String("string"),
				Readonly:                                     pulumi.Int(0),
				ReceiveTimeout:                               pulumi.Int(0),
				ReplicationAlterPartitionsSync:               pulumi.Int(0),
				ResultOverflowMode:                           pulumi.String("string"),
				SelectSequentialConsistency:                  pulumi.Bool(false),
				SendProgressInHttpHeaders:                    pulumi.Bool(false),
				SendTimeout:                                  pulumi.Int(0),
				SetOverflowMode:                              pulumi.String("string"),
				SkipUnavailableShards:                        pulumi.Bool(false),
				SortOverflowMode:                             pulumi.String("string"),
				TimeoutOverflowMode:                          pulumi.String("string"),
				TransferOverflowMode:                         pulumi.String("string"),
				TransformNullIn:                              pulumi.Bool(false),
				UseUncompressedCache:                         pulumi.Bool(false),
			},
		},
	},
	Version: pulumi.String("string"),
	Zookeeper: &yandex.MdbClickhouseClusterZookeeperArgs{
		Resources: &yandex.MdbClickhouseClusterZookeeperResourcesArgs{
			DiskSize:         pulumi.Int(0),
			DiskTypeId:       pulumi.String("string"),
			ResourcePresetId: pulumi.String("string"),
		},
	},
})
var mdbClickhouseClusterResource = new MdbClickhouseCluster("mdbClickhouseClusterResource", MdbClickhouseClusterArgs.builder()
    .environment("string")
    .networkId("string")
    .hosts(MdbClickhouseClusterHostArgs.builder()
        .type("string")
        .zone("string")
        .assignPublicIp(false)
        .fqdn("string")
        .shardName("string")
        .subnetId("string")
        .build())
    .clickhouse(MdbClickhouseClusterClickhouseArgs.builder()
        .resources(MdbClickhouseClusterClickhouseResourcesArgs.builder()
            .diskSize(0)
            .diskTypeId("string")
            .resourcePresetId("string")
            .build())
        .config(MdbClickhouseClusterClickhouseConfigArgs.builder()
            .backgroundPoolSize(0)
            .backgroundSchedulePoolSize(0)
            .compressions(MdbClickhouseClusterClickhouseConfigCompressionArgs.builder()
                .method("string")
                .minPartSize(0)
                .minPartSizeRatio(0)
                .build())
            .geobaseUri("string")
            .graphiteRollups(MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs.builder()
                .name("string")
                .patterns(MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs.builder()
                    .function("string")
                    .regexp("string")
                    .retentions(MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs.builder()
                        .age(0)
                        .precision(0)
                        .build())
                    .build())
                .build())
            .kafka(MdbClickhouseClusterClickhouseConfigKafkaArgs.builder()
                .saslMechanism("string")
                .saslPassword("string")
                .saslUsername("string")
                .securityProtocol("string")
                .build())
            .kafkaTopics(MdbClickhouseClusterClickhouseConfigKafkaTopicArgs.builder()
                .name("string")
                .settings(MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs.builder()
                    .saslMechanism("string")
                    .saslPassword("string")
                    .saslUsername("string")
                    .securityProtocol("string")
                    .build())
                .build())
            .keepAliveTimeout(0)
            .logLevel("string")
            .markCacheSize(0)
            .maxConcurrentQueries(0)
            .maxConnections(0)
            .maxPartitionSizeToDrop(0)
            .maxTableSizeToDrop(0)
            .mergeTree(MdbClickhouseClusterClickhouseConfigMergeTreeArgs.builder()
                .maxBytesToMergeAtMinSpaceInPool(0)
                .maxReplicatedMergesInQueue(0)
                .numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge(0)
                .partsToDelayInsert(0)
                .partsToThrowInsert(0)
                .replicatedDeduplicationWindow(0)
                .replicatedDeduplicationWindowSeconds(0)
                .build())
            .metricLogEnabled(false)
            .metricLogRetentionSize(0)
            .metricLogRetentionTime(0)
            .partLogRetentionSize(0)
            .partLogRetentionTime(0)
            .queryLogRetentionSize(0)
            .queryLogRetentionTime(0)
            .queryThreadLogEnabled(false)
            .queryThreadLogRetentionSize(0)
            .queryThreadLogRetentionTime(0)
            .rabbitmq(MdbClickhouseClusterClickhouseConfigRabbitmqArgs.builder()
                .password("string")
                .username("string")
                .build())
            .textLogEnabled(false)
            .textLogLevel("string")
            .textLogRetentionSize(0)
            .textLogRetentionTime(0)
            .timezone("string")
            .traceLogEnabled(false)
            .traceLogRetentionSize(0)
            .traceLogRetentionTime(0)
            .uncompressedCacheSize(0)
            .build())
        .build())
    .databases(MdbClickhouseClusterDatabaseArgs.builder()
        .name("string")
        .build())
    .mlModels(MdbClickhouseClusterMlModelArgs.builder()
        .name("string")
        .type("string")
        .uri("string")
        .build())
    .access(MdbClickhouseClusterAccessArgs.builder()
        .dataLens(false)
        .metrika(false)
        .serverless(false)
        .webSql(false)
        .build())
    .deletionProtection(false)
    .description("string")
    .cloudStorage(MdbClickhouseClusterCloudStorageArgs.builder()
        .enabled(false)
        .build())
    .folderId("string")
    .formatSchemas(MdbClickhouseClusterFormatSchemaArgs.builder()
        .name("string")
        .type("string")
        .uri("string")
        .build())
    .backupWindowStart(MdbClickhouseClusterBackupWindowStartArgs.builder()
        .hours(0)
        .minutes(0)
        .build())
    .labels(Map.of("string", "string"))
    .maintenanceWindow(MdbClickhouseClusterMaintenanceWindowArgs.builder()
        .type("string")
        .day("string")
        .hour(0)
        .build())
    .copySchemaOnNewHosts(false)
    .name("string")
    .adminPassword("string")
    .securityGroupIds("string")
    .serviceAccountId("string")
    .shardGroups(MdbClickhouseClusterShardGroupArgs.builder()
        .name("string")
        .shardNames("string")
        .description("string")
        .build())
    .sqlDatabaseManagement(false)
    .sqlUserManagement(false)
    .users(MdbClickhouseClusterUserArgs.builder()
        .name("string")
        .password("string")
        .permissions(MdbClickhouseClusterUserPermissionArgs.builder()
            .databaseName("string")
            .build())
        .quotas(MdbClickhouseClusterUserQuotaArgs.builder()
            .intervalDuration(0)
            .errors(0)
            .executionTime(0)
            .queries(0)
            .readRows(0)
            .resultRows(0)
            .build())
        .settings(MdbClickhouseClusterUserSettingsArgs.builder()
            .addHttpCorsHeader(false)
            .allowDdl(false)
            .compile(false)
            .compileExpressions(false)
            .connectTimeout(0)
            .countDistinctImplementation("string")
            .distinctOverflowMode("string")
            .distributedAggregationMemoryEfficient(false)
            .distributedDdlTaskTimeout(0)
            .distributedProductMode("string")
            .emptyResultForAggregationByEmptySet(false)
            .enableHttpCompression(false)
            .fallbackToStaleReplicasForDistributedQueries(false)
            .forceIndexByDate(false)
            .forcePrimaryKey(false)
            .groupByOverflowMode("string")
            .groupByTwoLevelThreshold(0)
            .groupByTwoLevelThresholdBytes(0)
            .httpConnectionTimeout(0)
            .httpHeadersProgressInterval(0)
            .httpReceiveTimeout(0)
            .httpSendTimeout(0)
            .inputFormatDefaultsForOmittedFields(false)
            .inputFormatValuesInterpretExpressions(false)
            .insertQuorum(0)
            .insertQuorumTimeout(0)
            .joinOverflowMode("string")
            .joinUseNulls(false)
            .joinedSubqueryRequiresAlias(false)
            .lowCardinalityAllowInNativeFormat(false)
            .maxAstDepth(0)
            .maxAstElements(0)
            .maxBlockSize(0)
            .maxBytesBeforeExternalGroupBy(0)
            .maxBytesBeforeExternalSort(0)
            .maxBytesInDistinct(0)
            .maxBytesInJoin(0)
            .maxBytesInSet(0)
            .maxBytesToRead(0)
            .maxBytesToSort(0)
            .maxBytesToTransfer(0)
            .maxColumnsToRead(0)
            .maxExecutionTime(0)
            .maxExpandedAstElements(0)
            .maxInsertBlockSize(0)
            .maxMemoryUsage(0)
            .maxMemoryUsageForUser(0)
            .maxNetworkBandwidth(0)
            .maxNetworkBandwidthForUser(0)
            .maxQuerySize(0)
            .maxReplicaDelayForDistributedQueries(0)
            .maxResultBytes(0)
            .maxResultRows(0)
            .maxRowsInDistinct(0)
            .maxRowsInJoin(0)
            .maxRowsInSet(0)
            .maxRowsToGroupBy(0)
            .maxRowsToRead(0)
            .maxRowsToSort(0)
            .maxRowsToTransfer(0)
            .maxTemporaryColumns(0)
            .maxTemporaryNonConstColumns(0)
            .maxThreads(0)
            .mergeTreeMaxBytesToUseCache(0)
            .mergeTreeMaxRowsToUseCache(0)
            .mergeTreeMinBytesForConcurrentRead(0)
            .mergeTreeMinRowsForConcurrentRead(0)
            .minBytesToUseDirectIo(0)
            .minCountToCompile(0)
            .minCountToCompileExpression(0)
            .minExecutionSpeed(0)
            .minExecutionSpeedBytes(0)
            .minInsertBlockSizeBytes(0)
            .minInsertBlockSizeRows(0)
            .outputFormatJsonQuote64bitIntegers(false)
            .outputFormatJsonQuoteDenormals(false)
            .priority(0)
            .quotaMode("string")
            .readOverflowMode("string")
            .readonly(0)
            .receiveTimeout(0)
            .replicationAlterPartitionsSync(0)
            .resultOverflowMode("string")
            .selectSequentialConsistency(false)
            .sendProgressInHttpHeaders(false)
            .sendTimeout(0)
            .setOverflowMode("string")
            .skipUnavailableShards(false)
            .sortOverflowMode("string")
            .timeoutOverflowMode("string")
            .transferOverflowMode("string")
            .transformNullIn(false)
            .useUncompressedCache(false)
            .build())
        .build())
    .version("string")
    .zookeeper(MdbClickhouseClusterZookeeperArgs.builder()
        .resources(MdbClickhouseClusterZookeeperResourcesArgs.builder()
            .diskSize(0)
            .diskTypeId("string")
            .resourcePresetId("string")
            .build())
        .build())
    .build());
mdb_clickhouse_cluster_resource = yandex.MdbClickhouseCluster("mdbClickhouseClusterResource",
    environment="string",
    network_id="string",
    hosts=[{
        "type": "string",
        "zone": "string",
        "assign_public_ip": False,
        "fqdn": "string",
        "shard_name": "string",
        "subnet_id": "string",
    }],
    clickhouse={
        "resources": {
            "disk_size": 0,
            "disk_type_id": "string",
            "resource_preset_id": "string",
        },
        "config": {
            "background_pool_size": 0,
            "background_schedule_pool_size": 0,
            "compressions": [{
                "method": "string",
                "min_part_size": 0,
                "min_part_size_ratio": 0,
            }],
            "geobase_uri": "string",
            "graphite_rollups": [{
                "name": "string",
                "patterns": [{
                    "function": "string",
                    "regexp": "string",
                    "retentions": [{
                        "age": 0,
                        "precision": 0,
                    }],
                }],
            }],
            "kafka": {
                "sasl_mechanism": "string",
                "sasl_password": "string",
                "sasl_username": "string",
                "security_protocol": "string",
            },
            "kafka_topics": [{
                "name": "string",
                "settings": {
                    "sasl_mechanism": "string",
                    "sasl_password": "string",
                    "sasl_username": "string",
                    "security_protocol": "string",
                },
            }],
            "keep_alive_timeout": 0,
            "log_level": "string",
            "mark_cache_size": 0,
            "max_concurrent_queries": 0,
            "max_connections": 0,
            "max_partition_size_to_drop": 0,
            "max_table_size_to_drop": 0,
            "merge_tree": {
                "max_bytes_to_merge_at_min_space_in_pool": 0,
                "max_replicated_merges_in_queue": 0,
                "number_of_free_entries_in_pool_to_lower_max_size_of_merge": 0,
                "parts_to_delay_insert": 0,
                "parts_to_throw_insert": 0,
                "replicated_deduplication_window": 0,
                "replicated_deduplication_window_seconds": 0,
            },
            "metric_log_enabled": False,
            "metric_log_retention_size": 0,
            "metric_log_retention_time": 0,
            "part_log_retention_size": 0,
            "part_log_retention_time": 0,
            "query_log_retention_size": 0,
            "query_log_retention_time": 0,
            "query_thread_log_enabled": False,
            "query_thread_log_retention_size": 0,
            "query_thread_log_retention_time": 0,
            "rabbitmq": {
                "password": "string",
                "username": "string",
            },
            "text_log_enabled": False,
            "text_log_level": "string",
            "text_log_retention_size": 0,
            "text_log_retention_time": 0,
            "timezone": "string",
            "trace_log_enabled": False,
            "trace_log_retention_size": 0,
            "trace_log_retention_time": 0,
            "uncompressed_cache_size": 0,
        },
    },
    databases=[{
        "name": "string",
    }],
    ml_models=[{
        "name": "string",
        "type": "string",
        "uri": "string",
    }],
    access={
        "data_lens": False,
        "metrika": False,
        "serverless": False,
        "web_sql": False,
    },
    deletion_protection=False,
    description="string",
    cloud_storage={
        "enabled": False,
    },
    folder_id="string",
    format_schemas=[{
        "name": "string",
        "type": "string",
        "uri": "string",
    }],
    backup_window_start={
        "hours": 0,
        "minutes": 0,
    },
    labels={
        "string": "string",
    },
    maintenance_window={
        "type": "string",
        "day": "string",
        "hour": 0,
    },
    copy_schema_on_new_hosts=False,
    name="string",
    admin_password="string",
    security_group_ids=["string"],
    service_account_id="string",
    shard_groups=[{
        "name": "string",
        "shard_names": ["string"],
        "description": "string",
    }],
    sql_database_management=False,
    sql_user_management=False,
    users=[{
        "name": "string",
        "password": "string",
        "permissions": [{
            "database_name": "string",
        }],
        "quotas": [{
            "interval_duration": 0,
            "errors": 0,
            "execution_time": 0,
            "queries": 0,
            "read_rows": 0,
            "result_rows": 0,
        }],
        "settings": {
            "add_http_cors_header": False,
            "allow_ddl": False,
            "compile": False,
            "compile_expressions": False,
            "connect_timeout": 0,
            "count_distinct_implementation": "string",
            "distinct_overflow_mode": "string",
            "distributed_aggregation_memory_efficient": False,
            "distributed_ddl_task_timeout": 0,
            "distributed_product_mode": "string",
            "empty_result_for_aggregation_by_empty_set": False,
            "enable_http_compression": False,
            "fallback_to_stale_replicas_for_distributed_queries": False,
            "force_index_by_date": False,
            "force_primary_key": False,
            "group_by_overflow_mode": "string",
            "group_by_two_level_threshold": 0,
            "group_by_two_level_threshold_bytes": 0,
            "http_connection_timeout": 0,
            "http_headers_progress_interval": 0,
            "http_receive_timeout": 0,
            "http_send_timeout": 0,
            "input_format_defaults_for_omitted_fields": False,
            "input_format_values_interpret_expressions": False,
            "insert_quorum": 0,
            "insert_quorum_timeout": 0,
            "join_overflow_mode": "string",
            "join_use_nulls": False,
            "joined_subquery_requires_alias": False,
            "low_cardinality_allow_in_native_format": False,
            "max_ast_depth": 0,
            "max_ast_elements": 0,
            "max_block_size": 0,
            "max_bytes_before_external_group_by": 0,
            "max_bytes_before_external_sort": 0,
            "max_bytes_in_distinct": 0,
            "max_bytes_in_join": 0,
            "max_bytes_in_set": 0,
            "max_bytes_to_read": 0,
            "max_bytes_to_sort": 0,
            "max_bytes_to_transfer": 0,
            "max_columns_to_read": 0,
            "max_execution_time": 0,
            "max_expanded_ast_elements": 0,
            "max_insert_block_size": 0,
            "max_memory_usage": 0,
            "max_memory_usage_for_user": 0,
            "max_network_bandwidth": 0,
            "max_network_bandwidth_for_user": 0,
            "max_query_size": 0,
            "max_replica_delay_for_distributed_queries": 0,
            "max_result_bytes": 0,
            "max_result_rows": 0,
            "max_rows_in_distinct": 0,
            "max_rows_in_join": 0,
            "max_rows_in_set": 0,
            "max_rows_to_group_by": 0,
            "max_rows_to_read": 0,
            "max_rows_to_sort": 0,
            "max_rows_to_transfer": 0,
            "max_temporary_columns": 0,
            "max_temporary_non_const_columns": 0,
            "max_threads": 0,
            "merge_tree_max_bytes_to_use_cache": 0,
            "merge_tree_max_rows_to_use_cache": 0,
            "merge_tree_min_bytes_for_concurrent_read": 0,
            "merge_tree_min_rows_for_concurrent_read": 0,
            "min_bytes_to_use_direct_io": 0,
            "min_count_to_compile": 0,
            "min_count_to_compile_expression": 0,
            "min_execution_speed": 0,
            "min_execution_speed_bytes": 0,
            "min_insert_block_size_bytes": 0,
            "min_insert_block_size_rows": 0,
            "output_format_json_quote64bit_integers": False,
            "output_format_json_quote_denormals": False,
            "priority": 0,
            "quota_mode": "string",
            "read_overflow_mode": "string",
            "readonly": 0,
            "receive_timeout": 0,
            "replication_alter_partitions_sync": 0,
            "result_overflow_mode": "string",
            "select_sequential_consistency": False,
            "send_progress_in_http_headers": False,
            "send_timeout": 0,
            "set_overflow_mode": "string",
            "skip_unavailable_shards": False,
            "sort_overflow_mode": "string",
            "timeout_overflow_mode": "string",
            "transfer_overflow_mode": "string",
            "transform_null_in": False,
            "use_uncompressed_cache": False,
        },
    }],
    version="string",
    zookeeper={
        "resources": {
            "disk_size": 0,
            "disk_type_id": "string",
            "resource_preset_id": "string",
        },
    })
const mdbClickhouseClusterResource = new yandex.MdbClickhouseCluster("mdbClickhouseClusterResource", {
    environment: "string",
    networkId: "string",
    hosts: [{
        type: "string",
        zone: "string",
        assignPublicIp: false,
        fqdn: "string",
        shardName: "string",
        subnetId: "string",
    }],
    clickhouse: {
        resources: {
            diskSize: 0,
            diskTypeId: "string",
            resourcePresetId: "string",
        },
        config: {
            backgroundPoolSize: 0,
            backgroundSchedulePoolSize: 0,
            compressions: [{
                method: "string",
                minPartSize: 0,
                minPartSizeRatio: 0,
            }],
            geobaseUri: "string",
            graphiteRollups: [{
                name: "string",
                patterns: [{
                    "function": "string",
                    regexp: "string",
                    retentions: [{
                        age: 0,
                        precision: 0,
                    }],
                }],
            }],
            kafka: {
                saslMechanism: "string",
                saslPassword: "string",
                saslUsername: "string",
                securityProtocol: "string",
            },
            kafkaTopics: [{
                name: "string",
                settings: {
                    saslMechanism: "string",
                    saslPassword: "string",
                    saslUsername: "string",
                    securityProtocol: "string",
                },
            }],
            keepAliveTimeout: 0,
            logLevel: "string",
            markCacheSize: 0,
            maxConcurrentQueries: 0,
            maxConnections: 0,
            maxPartitionSizeToDrop: 0,
            maxTableSizeToDrop: 0,
            mergeTree: {
                maxBytesToMergeAtMinSpaceInPool: 0,
                maxReplicatedMergesInQueue: 0,
                numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 0,
                partsToDelayInsert: 0,
                partsToThrowInsert: 0,
                replicatedDeduplicationWindow: 0,
                replicatedDeduplicationWindowSeconds: 0,
            },
            metricLogEnabled: false,
            metricLogRetentionSize: 0,
            metricLogRetentionTime: 0,
            partLogRetentionSize: 0,
            partLogRetentionTime: 0,
            queryLogRetentionSize: 0,
            queryLogRetentionTime: 0,
            queryThreadLogEnabled: false,
            queryThreadLogRetentionSize: 0,
            queryThreadLogRetentionTime: 0,
            rabbitmq: {
                password: "string",
                username: "string",
            },
            textLogEnabled: false,
            textLogLevel: "string",
            textLogRetentionSize: 0,
            textLogRetentionTime: 0,
            timezone: "string",
            traceLogEnabled: false,
            traceLogRetentionSize: 0,
            traceLogRetentionTime: 0,
            uncompressedCacheSize: 0,
        },
    },
    databases: [{
        name: "string",
    }],
    mlModels: [{
        name: "string",
        type: "string",
        uri: "string",
    }],
    access: {
        dataLens: false,
        metrika: false,
        serverless: false,
        webSql: false,
    },
    deletionProtection: false,
    description: "string",
    cloudStorage: {
        enabled: false,
    },
    folderId: "string",
    formatSchemas: [{
        name: "string",
        type: "string",
        uri: "string",
    }],
    backupWindowStart: {
        hours: 0,
        minutes: 0,
    },
    labels: {
        string: "string",
    },
    maintenanceWindow: {
        type: "string",
        day: "string",
        hour: 0,
    },
    copySchemaOnNewHosts: false,
    name: "string",
    adminPassword: "string",
    securityGroupIds: ["string"],
    serviceAccountId: "string",
    shardGroups: [{
        name: "string",
        shardNames: ["string"],
        description: "string",
    }],
    sqlDatabaseManagement: false,
    sqlUserManagement: false,
    users: [{
        name: "string",
        password: "string",
        permissions: [{
            databaseName: "string",
        }],
        quotas: [{
            intervalDuration: 0,
            errors: 0,
            executionTime: 0,
            queries: 0,
            readRows: 0,
            resultRows: 0,
        }],
        settings: {
            addHttpCorsHeader: false,
            allowDdl: false,
            compile: false,
            compileExpressions: false,
            connectTimeout: 0,
            countDistinctImplementation: "string",
            distinctOverflowMode: "string",
            distributedAggregationMemoryEfficient: false,
            distributedDdlTaskTimeout: 0,
            distributedProductMode: "string",
            emptyResultForAggregationByEmptySet: false,
            enableHttpCompression: false,
            fallbackToStaleReplicasForDistributedQueries: false,
            forceIndexByDate: false,
            forcePrimaryKey: false,
            groupByOverflowMode: "string",
            groupByTwoLevelThreshold: 0,
            groupByTwoLevelThresholdBytes: 0,
            httpConnectionTimeout: 0,
            httpHeadersProgressInterval: 0,
            httpReceiveTimeout: 0,
            httpSendTimeout: 0,
            inputFormatDefaultsForOmittedFields: false,
            inputFormatValuesInterpretExpressions: false,
            insertQuorum: 0,
            insertQuorumTimeout: 0,
            joinOverflowMode: "string",
            joinUseNulls: false,
            joinedSubqueryRequiresAlias: false,
            lowCardinalityAllowInNativeFormat: false,
            maxAstDepth: 0,
            maxAstElements: 0,
            maxBlockSize: 0,
            maxBytesBeforeExternalGroupBy: 0,
            maxBytesBeforeExternalSort: 0,
            maxBytesInDistinct: 0,
            maxBytesInJoin: 0,
            maxBytesInSet: 0,
            maxBytesToRead: 0,
            maxBytesToSort: 0,
            maxBytesToTransfer: 0,
            maxColumnsToRead: 0,
            maxExecutionTime: 0,
            maxExpandedAstElements: 0,
            maxInsertBlockSize: 0,
            maxMemoryUsage: 0,
            maxMemoryUsageForUser: 0,
            maxNetworkBandwidth: 0,
            maxNetworkBandwidthForUser: 0,
            maxQuerySize: 0,
            maxReplicaDelayForDistributedQueries: 0,
            maxResultBytes: 0,
            maxResultRows: 0,
            maxRowsInDistinct: 0,
            maxRowsInJoin: 0,
            maxRowsInSet: 0,
            maxRowsToGroupBy: 0,
            maxRowsToRead: 0,
            maxRowsToSort: 0,
            maxRowsToTransfer: 0,
            maxTemporaryColumns: 0,
            maxTemporaryNonConstColumns: 0,
            maxThreads: 0,
            mergeTreeMaxBytesToUseCache: 0,
            mergeTreeMaxRowsToUseCache: 0,
            mergeTreeMinBytesForConcurrentRead: 0,
            mergeTreeMinRowsForConcurrentRead: 0,
            minBytesToUseDirectIo: 0,
            minCountToCompile: 0,
            minCountToCompileExpression: 0,
            minExecutionSpeed: 0,
            minExecutionSpeedBytes: 0,
            minInsertBlockSizeBytes: 0,
            minInsertBlockSizeRows: 0,
            outputFormatJsonQuote64bitIntegers: false,
            outputFormatJsonQuoteDenormals: false,
            priority: 0,
            quotaMode: "string",
            readOverflowMode: "string",
            readonly: 0,
            receiveTimeout: 0,
            replicationAlterPartitionsSync: 0,
            resultOverflowMode: "string",
            selectSequentialConsistency: false,
            sendProgressInHttpHeaders: false,
            sendTimeout: 0,
            setOverflowMode: "string",
            skipUnavailableShards: false,
            sortOverflowMode: "string",
            timeoutOverflowMode: "string",
            transferOverflowMode: "string",
            transformNullIn: false,
            useUncompressedCache: false,
        },
    }],
    version: "string",
    zookeeper: {
        resources: {
            diskSize: 0,
            diskTypeId: "string",
            resourcePresetId: "string",
        },
    },
});
type: yandex:MdbClickhouseCluster
properties:
    access:
        dataLens: false
        metrika: false
        serverless: false
        webSql: false
    adminPassword: string
    backupWindowStart:
        hours: 0
        minutes: 0
    clickhouse:
        config:
            backgroundPoolSize: 0
            backgroundSchedulePoolSize: 0
            compressions:
                - method: string
                  minPartSize: 0
                  minPartSizeRatio: 0
            geobaseUri: string
            graphiteRollups:
                - name: string
                  patterns:
                    - function: string
                      regexp: string
                      retentions:
                        - age: 0
                          precision: 0
            kafka:
                saslMechanism: string
                saslPassword: string
                saslUsername: string
                securityProtocol: string
            kafkaTopics:
                - name: string
                  settings:
                    saslMechanism: string
                    saslPassword: string
                    saslUsername: string
                    securityProtocol: string
            keepAliveTimeout: 0
            logLevel: string
            markCacheSize: 0
            maxConcurrentQueries: 0
            maxConnections: 0
            maxPartitionSizeToDrop: 0
            maxTableSizeToDrop: 0
            mergeTree:
                maxBytesToMergeAtMinSpaceInPool: 0
                maxReplicatedMergesInQueue: 0
                numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 0
                partsToDelayInsert: 0
                partsToThrowInsert: 0
                replicatedDeduplicationWindow: 0
                replicatedDeduplicationWindowSeconds: 0
            metricLogEnabled: false
            metricLogRetentionSize: 0
            metricLogRetentionTime: 0
            partLogRetentionSize: 0
            partLogRetentionTime: 0
            queryLogRetentionSize: 0
            queryLogRetentionTime: 0
            queryThreadLogEnabled: false
            queryThreadLogRetentionSize: 0
            queryThreadLogRetentionTime: 0
            rabbitmq:
                password: string
                username: string
            textLogEnabled: false
            textLogLevel: string
            textLogRetentionSize: 0
            textLogRetentionTime: 0
            timezone: string
            traceLogEnabled: false
            traceLogRetentionSize: 0
            traceLogRetentionTime: 0
            uncompressedCacheSize: 0
        resources:
            diskSize: 0
            diskTypeId: string
            resourcePresetId: string
    cloudStorage:
        enabled: false
    copySchemaOnNewHosts: false
    databases:
        - name: string
    deletionProtection: false
    description: string
    environment: string
    folderId: string
    formatSchemas:
        - name: string
          type: string
          uri: string
    hosts:
        - assignPublicIp: false
          fqdn: string
          shardName: string
          subnetId: string
          type: string
          zone: string
    labels:
        string: string
    maintenanceWindow:
        day: string
        hour: 0
        type: string
    mlModels:
        - name: string
          type: string
          uri: string
    name: string
    networkId: string
    securityGroupIds:
        - string
    serviceAccountId: string
    shardGroups:
        - description: string
          name: string
          shardNames:
            - string
    sqlDatabaseManagement: false
    sqlUserManagement: false
    users:
        - name: string
          password: string
          permissions:
            - databaseName: string
          quotas:
            - errors: 0
              executionTime: 0
              intervalDuration: 0
              queries: 0
              readRows: 0
              resultRows: 0
          settings:
            addHttpCorsHeader: false
            allowDdl: false
            compile: false
            compileExpressions: false
            connectTimeout: 0
            countDistinctImplementation: string
            distinctOverflowMode: string
            distributedAggregationMemoryEfficient: false
            distributedDdlTaskTimeout: 0
            distributedProductMode: string
            emptyResultForAggregationByEmptySet: false
            enableHttpCompression: false
            fallbackToStaleReplicasForDistributedQueries: false
            forceIndexByDate: false
            forcePrimaryKey: false
            groupByOverflowMode: string
            groupByTwoLevelThreshold: 0
            groupByTwoLevelThresholdBytes: 0
            httpConnectionTimeout: 0
            httpHeadersProgressInterval: 0
            httpReceiveTimeout: 0
            httpSendTimeout: 0
            inputFormatDefaultsForOmittedFields: false
            inputFormatValuesInterpretExpressions: false
            insertQuorum: 0
            insertQuorumTimeout: 0
            joinOverflowMode: string
            joinUseNulls: false
            joinedSubqueryRequiresAlias: false
            lowCardinalityAllowInNativeFormat: false
            maxAstDepth: 0
            maxAstElements: 0
            maxBlockSize: 0
            maxBytesBeforeExternalGroupBy: 0
            maxBytesBeforeExternalSort: 0
            maxBytesInDistinct: 0
            maxBytesInJoin: 0
            maxBytesInSet: 0
            maxBytesToRead: 0
            maxBytesToSort: 0
            maxBytesToTransfer: 0
            maxColumnsToRead: 0
            maxExecutionTime: 0
            maxExpandedAstElements: 0
            maxInsertBlockSize: 0
            maxMemoryUsage: 0
            maxMemoryUsageForUser: 0
            maxNetworkBandwidth: 0
            maxNetworkBandwidthForUser: 0
            maxQuerySize: 0
            maxReplicaDelayForDistributedQueries: 0
            maxResultBytes: 0
            maxResultRows: 0
            maxRowsInDistinct: 0
            maxRowsInJoin: 0
            maxRowsInSet: 0
            maxRowsToGroupBy: 0
            maxRowsToRead: 0
            maxRowsToSort: 0
            maxRowsToTransfer: 0
            maxTemporaryColumns: 0
            maxTemporaryNonConstColumns: 0
            maxThreads: 0
            mergeTreeMaxBytesToUseCache: 0
            mergeTreeMaxRowsToUseCache: 0
            mergeTreeMinBytesForConcurrentRead: 0
            mergeTreeMinRowsForConcurrentRead: 0
            minBytesToUseDirectIo: 0
            minCountToCompile: 0
            minCountToCompileExpression: 0
            minExecutionSpeed: 0
            minExecutionSpeedBytes: 0
            minInsertBlockSizeBytes: 0
            minInsertBlockSizeRows: 0
            outputFormatJsonQuote64bitIntegers: false
            outputFormatJsonQuoteDenormals: false
            priority: 0
            quotaMode: string
            readOverflowMode: string
            readonly: 0
            receiveTimeout: 0
            replicationAlterPartitionsSync: 0
            resultOverflowMode: string
            selectSequentialConsistency: false
            sendProgressInHttpHeaders: false
            sendTimeout: 0
            setOverflowMode: string
            skipUnavailableShards: false
            sortOverflowMode: string
            timeoutOverflowMode: string
            transferOverflowMode: string
            transformNullIn: false
            useUncompressedCache: false
    version: string
    zookeeper:
        resources:
            diskSize: 0
            diskTypeId: string
            resourcePresetId: string
MdbClickhouseCluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The MdbClickhouseCluster resource accepts the following input properties:
- Clickhouse
MdbClickhouse Cluster Clickhouse 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- Environment string
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- Hosts
List<MdbClickhouse Cluster Host> 
- A host of the ClickHouse cluster. The structure is documented below.
- NetworkId string
- ID of the network, to which the ClickHouse cluster belongs.
- Access
MdbClickhouse Cluster Access 
- Access policy to the ClickHouse cluster. The structure is documented below.
- AdminPassword string
- A password used to authorize as user adminwhensql_user_managementenabled.
- BackupWindow MdbStart Clickhouse Cluster Backup Window Start 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- CloudStorage MdbClickhouse Cluster Cloud Storage 
- CopySchema boolOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- Databases
List<MdbClickhouse Cluster Database> 
- A database of the ClickHouse cluster. The structure is documented below.
- DeletionProtection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- Description string
- Description of the shard group.
- FolderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- FormatSchemas List<MdbClickhouse Cluster Format Schema> 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- Labels Dictionary<string, string>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- MaintenanceWindow MdbClickhouse Cluster Maintenance Window 
- MlModels List<MdbClickhouse Cluster Ml Model> 
- A group of machine learning models. The structure is documented below
- Name string
- Graphite rollup configuration name.
- SecurityGroup List<string>Ids 
- A set of ids of security groups assigned to hosts of the cluster.
- ServiceAccount stringId 
- ID of the service account used for access to Yandex Object Storage.
- 
List<MdbClickhouse Cluster Shard Group> 
- A group of clickhouse shards. The structure is documented below.
- SqlDatabase boolManagement 
- Grants adminuser database management permission.
- SqlUser boolManagement 
- Enables adminuser with user management permission.
- Users
List<MdbClickhouse Cluster User> 
- A user of the ClickHouse cluster. The structure is documented below.
- Version string
- Version of the ClickHouse server software.
- Zookeeper
MdbClickhouse Cluster Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- Clickhouse
MdbClickhouse Cluster Clickhouse Args 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- Environment string
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- Hosts
[]MdbClickhouse Cluster Host Args 
- A host of the ClickHouse cluster. The structure is documented below.
- NetworkId string
- ID of the network, to which the ClickHouse cluster belongs.
- Access
MdbClickhouse Cluster Access Args 
- Access policy to the ClickHouse cluster. The structure is documented below.
- AdminPassword string
- A password used to authorize as user adminwhensql_user_managementenabled.
- BackupWindow MdbStart Clickhouse Cluster Backup Window Start Args 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- CloudStorage MdbClickhouse Cluster Cloud Storage Args 
- CopySchema boolOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- Databases
[]MdbClickhouse Cluster Database Args 
- A database of the ClickHouse cluster. The structure is documented below.
- DeletionProtection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- Description string
- Description of the shard group.
- FolderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- FormatSchemas []MdbClickhouse Cluster Format Schema Args 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- Labels map[string]string
- A set of key/value label pairs to assign to the ClickHouse cluster.
- MaintenanceWindow MdbClickhouse Cluster Maintenance Window Args 
- MlModels []MdbClickhouse Cluster Ml Model Args 
- A group of machine learning models. The structure is documented below
- Name string
- Graphite rollup configuration name.
- SecurityGroup []stringIds 
- A set of ids of security groups assigned to hosts of the cluster.
- ServiceAccount stringId 
- ID of the service account used for access to Yandex Object Storage.
- 
[]MdbClickhouse Cluster Shard Group Args 
- A group of clickhouse shards. The structure is documented below.
- SqlDatabase boolManagement 
- Grants adminuser database management permission.
- SqlUser boolManagement 
- Enables adminuser with user management permission.
- Users
[]MdbClickhouse Cluster User Args 
- A user of the ClickHouse cluster. The structure is documented below.
- Version string
- Version of the ClickHouse server software.
- Zookeeper
MdbClickhouse Cluster Zookeeper Args 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- clickhouse
MdbClickhouse Cluster Clickhouse 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- environment String
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- hosts
List<MdbClickhouse Cluster Host> 
- A host of the ClickHouse cluster. The structure is documented below.
- networkId String
- ID of the network, to which the ClickHouse cluster belongs.
- access
MdbClickhouse Cluster Access 
- Access policy to the ClickHouse cluster. The structure is documented below.
- adminPassword String
- A password used to authorize as user adminwhensql_user_managementenabled.
- backupWindow MdbStart Clickhouse Cluster Backup Window Start 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- cloudStorage MdbClickhouse Cluster Cloud Storage 
- copySchema BooleanOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- databases
List<MdbClickhouse Cluster Database> 
- A database of the ClickHouse cluster. The structure is documented below.
- deletionProtection Boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description String
- Description of the shard group.
- folderId String
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- formatSchemas List<MdbClickhouse Cluster Format Schema> 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- labels Map<String,String>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenanceWindow MdbClickhouse Cluster Maintenance Window 
- mlModels List<MdbClickhouse Cluster Ml Model> 
- A group of machine learning models. The structure is documented below
- name String
- Graphite rollup configuration name.
- securityGroup List<String>Ids 
- A set of ids of security groups assigned to hosts of the cluster.
- serviceAccount StringId 
- ID of the service account used for access to Yandex Object Storage.
- 
List<MdbClickhouse Cluster Shard Group> 
- A group of clickhouse shards. The structure is documented below.
- sqlDatabase BooleanManagement 
- Grants adminuser database management permission.
- sqlUser BooleanManagement 
- Enables adminuser with user management permission.
- users
List<MdbClickhouse Cluster User> 
- A user of the ClickHouse cluster. The structure is documented below.
- version String
- Version of the ClickHouse server software.
- zookeeper
MdbClickhouse Cluster Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- clickhouse
MdbClickhouse Cluster Clickhouse 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- environment string
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- hosts
MdbClickhouse Cluster Host[] 
- A host of the ClickHouse cluster. The structure is documented below.
- networkId string
- ID of the network, to which the ClickHouse cluster belongs.
- access
MdbClickhouse Cluster Access 
- Access policy to the ClickHouse cluster. The structure is documented below.
- adminPassword string
- A password used to authorize as user adminwhensql_user_managementenabled.
- backupWindow MdbStart Clickhouse Cluster Backup Window Start 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- cloudStorage MdbClickhouse Cluster Cloud Storage 
- copySchema booleanOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- databases
MdbClickhouse Cluster Database[] 
- A database of the ClickHouse cluster. The structure is documented below.
- deletionProtection boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description string
- Description of the shard group.
- folderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- formatSchemas MdbClickhouse Cluster Format Schema[] 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- labels {[key: string]: string}
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenanceWindow MdbClickhouse Cluster Maintenance Window 
- mlModels MdbClickhouse Cluster Ml Model[] 
- A group of machine learning models. The structure is documented below
- name string
- Graphite rollup configuration name.
- securityGroup string[]Ids 
- A set of ids of security groups assigned to hosts of the cluster.
- serviceAccount stringId 
- ID of the service account used for access to Yandex Object Storage.
- 
MdbClickhouse Cluster Shard Group[] 
- A group of clickhouse shards. The structure is documented below.
- sqlDatabase booleanManagement 
- Grants adminuser database management permission.
- sqlUser booleanManagement 
- Enables adminuser with user management permission.
- users
MdbClickhouse Cluster User[] 
- A user of the ClickHouse cluster. The structure is documented below.
- version string
- Version of the ClickHouse server software.
- zookeeper
MdbClickhouse Cluster Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- clickhouse
MdbClickhouse Cluster Clickhouse Args 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- environment str
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- hosts
Sequence[MdbClickhouse Cluster Host Args] 
- A host of the ClickHouse cluster. The structure is documented below.
- network_id str
- ID of the network, to which the ClickHouse cluster belongs.
- access
MdbClickhouse Cluster Access Args 
- Access policy to the ClickHouse cluster. The structure is documented below.
- admin_password str
- A password used to authorize as user adminwhensql_user_managementenabled.
- backup_window_ Mdbstart Clickhouse Cluster Backup Window Start Args 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- cloud_storage MdbClickhouse Cluster Cloud Storage Args 
- copy_schema_ boolon_ new_ hosts 
- Whether to copy schema on new ClickHouse hosts.
- databases
Sequence[MdbClickhouse Cluster Database Args] 
- A database of the ClickHouse cluster. The structure is documented below.
- deletion_protection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description str
- Description of the shard group.
- folder_id str
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format_schemas Sequence[MdbClickhouse Cluster Format Schema Args] 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- labels Mapping[str, str]
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance_window MdbClickhouse Cluster Maintenance Window Args 
- ml_models Sequence[MdbClickhouse Cluster Ml Model Args] 
- A group of machine learning models. The structure is documented below
- name str
- Graphite rollup configuration name.
- security_group_ Sequence[str]ids 
- A set of ids of security groups assigned to hosts of the cluster.
- service_account_ strid 
- ID of the service account used for access to Yandex Object Storage.
- 
Sequence[MdbClickhouse Cluster Shard Group Args] 
- A group of clickhouse shards. The structure is documented below.
- sql_database_ boolmanagement 
- Grants adminuser database management permission.
- sql_user_ boolmanagement 
- Enables adminuser with user management permission.
- users
Sequence[MdbClickhouse Cluster User Args] 
- A user of the ClickHouse cluster. The structure is documented below.
- version str
- Version of the ClickHouse server software.
- zookeeper
MdbClickhouse Cluster Zookeeper Args 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- clickhouse Property Map
- Configuration of the ClickHouse subcluster. The structure is documented below.
- environment String
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- hosts List<Property Map>
- A host of the ClickHouse cluster. The structure is documented below.
- networkId String
- ID of the network, to which the ClickHouse cluster belongs.
- access Property Map
- Access policy to the ClickHouse cluster. The structure is documented below.
- adminPassword String
- A password used to authorize as user adminwhensql_user_managementenabled.
- backupWindow Property MapStart 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- cloudStorage Property Map
- copySchema BooleanOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- databases List<Property Map>
- A database of the ClickHouse cluster. The structure is documented below.
- deletionProtection Boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description String
- Description of the shard group.
- folderId String
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- formatSchemas List<Property Map>
- A set of protobuf or capnproto format schemas. The structure is documented below.
- labels Map<String>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenanceWindow Property Map
- mlModels List<Property Map>
- A group of machine learning models. The structure is documented below
- name String
- Graphite rollup configuration name.
- securityGroup List<String>Ids 
- A set of ids of security groups assigned to hosts of the cluster.
- serviceAccount StringId 
- ID of the service account used for access to Yandex Object Storage.
- List<Property Map>
- A group of clickhouse shards. The structure is documented below.
- sqlDatabase BooleanManagement 
- Grants adminuser database management permission.
- sqlUser BooleanManagement 
- Enables adminuser with user management permission.
- users List<Property Map>
- A user of the ClickHouse cluster. The structure is documented below.
- version String
- Version of the ClickHouse server software.
- zookeeper Property Map
- Configuration of the ZooKeeper subcluster. The structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the MdbClickhouseCluster resource produces the following output properties:
- CreatedAt string
- Timestamp of cluster creation.
- Health string
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- Id string
- The provider-assigned unique ID for this managed resource.
- Status string
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- CreatedAt string
- Timestamp of cluster creation.
- Health string
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- Id string
- The provider-assigned unique ID for this managed resource.
- Status string
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- createdAt String
- Timestamp of cluster creation.
- health String
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- id String
- The provider-assigned unique ID for this managed resource.
- status String
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- createdAt string
- Timestamp of cluster creation.
- health string
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- id string
- The provider-assigned unique ID for this managed resource.
- status string
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- created_at str
- Timestamp of cluster creation.
- health str
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- id str
- The provider-assigned unique ID for this managed resource.
- status str
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- createdAt String
- Timestamp of cluster creation.
- health String
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- id String
- The provider-assigned unique ID for this managed resource.
- status String
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
Look up Existing MdbClickhouseCluster Resource
Get an existing MdbClickhouseCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MdbClickhouseClusterState, opts?: CustomResourceOptions): MdbClickhouseCluster@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        access: Optional[MdbClickhouseClusterAccessArgs] = None,
        admin_password: Optional[str] = None,
        backup_window_start: Optional[MdbClickhouseClusterBackupWindowStartArgs] = None,
        clickhouse: Optional[MdbClickhouseClusterClickhouseArgs] = None,
        cloud_storage: Optional[MdbClickhouseClusterCloudStorageArgs] = None,
        copy_schema_on_new_hosts: Optional[bool] = None,
        created_at: Optional[str] = None,
        databases: Optional[Sequence[MdbClickhouseClusterDatabaseArgs]] = None,
        deletion_protection: Optional[bool] = None,
        description: Optional[str] = None,
        environment: Optional[str] = None,
        folder_id: Optional[str] = None,
        format_schemas: Optional[Sequence[MdbClickhouseClusterFormatSchemaArgs]] = None,
        health: Optional[str] = None,
        hosts: Optional[Sequence[MdbClickhouseClusterHostArgs]] = None,
        labels: Optional[Mapping[str, str]] = None,
        maintenance_window: Optional[MdbClickhouseClusterMaintenanceWindowArgs] = None,
        ml_models: Optional[Sequence[MdbClickhouseClusterMlModelArgs]] = None,
        name: Optional[str] = None,
        network_id: Optional[str] = None,
        security_group_ids: Optional[Sequence[str]] = None,
        service_account_id: Optional[str] = None,
        shard_groups: Optional[Sequence[MdbClickhouseClusterShardGroupArgs]] = None,
        sql_database_management: Optional[bool] = None,
        sql_user_management: Optional[bool] = None,
        status: Optional[str] = None,
        users: Optional[Sequence[MdbClickhouseClusterUserArgs]] = None,
        version: Optional[str] = None,
        zookeeper: Optional[MdbClickhouseClusterZookeeperArgs] = None) -> MdbClickhouseClusterfunc GetMdbClickhouseCluster(ctx *Context, name string, id IDInput, state *MdbClickhouseClusterState, opts ...ResourceOption) (*MdbClickhouseCluster, error)public static MdbClickhouseCluster Get(string name, Input<string> id, MdbClickhouseClusterState? state, CustomResourceOptions? opts = null)public static MdbClickhouseCluster get(String name, Output<String> id, MdbClickhouseClusterState state, CustomResourceOptions options)resources:  _:    type: yandex:MdbClickhouseCluster    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Access
MdbClickhouse Cluster Access 
- Access policy to the ClickHouse cluster. The structure is documented below.
- AdminPassword string
- A password used to authorize as user adminwhensql_user_managementenabled.
- BackupWindow MdbStart Clickhouse Cluster Backup Window Start 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- Clickhouse
MdbClickhouse Cluster Clickhouse 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- CloudStorage MdbClickhouse Cluster Cloud Storage 
- CopySchema boolOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- CreatedAt string
- Timestamp of cluster creation.
- Databases
List<MdbClickhouse Cluster Database> 
- A database of the ClickHouse cluster. The structure is documented below.
- DeletionProtection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- Description string
- Description of the shard group.
- Environment string
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- FolderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- FormatSchemas List<MdbClickhouse Cluster Format Schema> 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- Health string
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- Hosts
List<MdbClickhouse Cluster Host> 
- A host of the ClickHouse cluster. The structure is documented below.
- Labels Dictionary<string, string>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- MaintenanceWindow MdbClickhouse Cluster Maintenance Window 
- MlModels List<MdbClickhouse Cluster Ml Model> 
- A group of machine learning models. The structure is documented below
- Name string
- Graphite rollup configuration name.
- NetworkId string
- ID of the network, to which the ClickHouse cluster belongs.
- SecurityGroup List<string>Ids 
- A set of ids of security groups assigned to hosts of the cluster.
- ServiceAccount stringId 
- ID of the service account used for access to Yandex Object Storage.
- ShardGroups List<MdbClickhouse Cluster Shard Group> 
- A group of clickhouse shards. The structure is documented below.
- SqlDatabase boolManagement 
- Grants adminuser database management permission.
- SqlUser boolManagement 
- Enables adminuser with user management permission.
- Status string
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- Users
List<MdbClickhouse Cluster User> 
- A user of the ClickHouse cluster. The structure is documented below.
- Version string
- Version of the ClickHouse server software.
- Zookeeper
MdbClickhouse Cluster Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- Access
MdbClickhouse Cluster Access Args 
- Access policy to the ClickHouse cluster. The structure is documented below.
- AdminPassword string
- A password used to authorize as user adminwhensql_user_managementenabled.
- BackupWindow MdbStart Clickhouse Cluster Backup Window Start Args 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- Clickhouse
MdbClickhouse Cluster Clickhouse Args 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- CloudStorage MdbClickhouse Cluster Cloud Storage Args 
- CopySchema boolOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- CreatedAt string
- Timestamp of cluster creation.
- Databases
[]MdbClickhouse Cluster Database Args 
- A database of the ClickHouse cluster. The structure is documented below.
- DeletionProtection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- Description string
- Description of the shard group.
- Environment string
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- FolderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- FormatSchemas []MdbClickhouse Cluster Format Schema Args 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- Health string
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- Hosts
[]MdbClickhouse Cluster Host Args 
- A host of the ClickHouse cluster. The structure is documented below.
- Labels map[string]string
- A set of key/value label pairs to assign to the ClickHouse cluster.
- MaintenanceWindow MdbClickhouse Cluster Maintenance Window Args 
- MlModels []MdbClickhouse Cluster Ml Model Args 
- A group of machine learning models. The structure is documented below
- Name string
- Graphite rollup configuration name.
- NetworkId string
- ID of the network, to which the ClickHouse cluster belongs.
- SecurityGroup []stringIds 
- A set of ids of security groups assigned to hosts of the cluster.
- ServiceAccount stringId 
- ID of the service account used for access to Yandex Object Storage.
- ShardGroups []MdbClickhouse Cluster Shard Group Args 
- A group of clickhouse shards. The structure is documented below.
- SqlDatabase boolManagement 
- Grants adminuser database management permission.
- SqlUser boolManagement 
- Enables adminuser with user management permission.
- Status string
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- Users
[]MdbClickhouse Cluster User Args 
- A user of the ClickHouse cluster. The structure is documented below.
- Version string
- Version of the ClickHouse server software.
- Zookeeper
MdbClickhouse Cluster Zookeeper Args 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- access
MdbClickhouse Cluster Access 
- Access policy to the ClickHouse cluster. The structure is documented below.
- adminPassword String
- A password used to authorize as user adminwhensql_user_managementenabled.
- backupWindow MdbStart Clickhouse Cluster Backup Window Start 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- clickhouse
MdbClickhouse Cluster Clickhouse 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- cloudStorage MdbClickhouse Cluster Cloud Storage 
- copySchema BooleanOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- createdAt String
- Timestamp of cluster creation.
- databases
List<MdbClickhouse Cluster Database> 
- A database of the ClickHouse cluster. The structure is documented below.
- deletionProtection Boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description String
- Description of the shard group.
- environment String
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- folderId String
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- formatSchemas List<MdbClickhouse Cluster Format Schema> 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- health String
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- hosts
List<MdbClickhouse Cluster Host> 
- A host of the ClickHouse cluster. The structure is documented below.
- labels Map<String,String>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenanceWindow MdbClickhouse Cluster Maintenance Window 
- mlModels List<MdbClickhouse Cluster Ml Model> 
- A group of machine learning models. The structure is documented below
- name String
- Graphite rollup configuration name.
- networkId String
- ID of the network, to which the ClickHouse cluster belongs.
- securityGroup List<String>Ids 
- A set of ids of security groups assigned to hosts of the cluster.
- serviceAccount StringId 
- ID of the service account used for access to Yandex Object Storage.
- shardGroups List<MdbClickhouse Cluster Shard Group> 
- A group of clickhouse shards. The structure is documented below.
- sqlDatabase BooleanManagement 
- Grants adminuser database management permission.
- sqlUser BooleanManagement 
- Enables adminuser with user management permission.
- status String
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- users
List<MdbClickhouse Cluster User> 
- A user of the ClickHouse cluster. The structure is documented below.
- version String
- Version of the ClickHouse server software.
- zookeeper
MdbClickhouse Cluster Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- access
MdbClickhouse Cluster Access 
- Access policy to the ClickHouse cluster. The structure is documented below.
- adminPassword string
- A password used to authorize as user adminwhensql_user_managementenabled.
- backupWindow MdbStart Clickhouse Cluster Backup Window Start 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- clickhouse
MdbClickhouse Cluster Clickhouse 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- cloudStorage MdbClickhouse Cluster Cloud Storage 
- copySchema booleanOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- createdAt string
- Timestamp of cluster creation.
- databases
MdbClickhouse Cluster Database[] 
- A database of the ClickHouse cluster. The structure is documented below.
- deletionProtection boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description string
- Description of the shard group.
- environment string
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- folderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- formatSchemas MdbClickhouse Cluster Format Schema[] 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- health string
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- hosts
MdbClickhouse Cluster Host[] 
- A host of the ClickHouse cluster. The structure is documented below.
- labels {[key: string]: string}
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenanceWindow MdbClickhouse Cluster Maintenance Window 
- mlModels MdbClickhouse Cluster Ml Model[] 
- A group of machine learning models. The structure is documented below
- name string
- Graphite rollup configuration name.
- networkId string
- ID of the network, to which the ClickHouse cluster belongs.
- securityGroup string[]Ids 
- A set of ids of security groups assigned to hosts of the cluster.
- serviceAccount stringId 
- ID of the service account used for access to Yandex Object Storage.
- shardGroups MdbClickhouse Cluster Shard Group[] 
- A group of clickhouse shards. The structure is documented below.
- sqlDatabase booleanManagement 
- Grants adminuser database management permission.
- sqlUser booleanManagement 
- Enables adminuser with user management permission.
- status string
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- users
MdbClickhouse Cluster User[] 
- A user of the ClickHouse cluster. The structure is documented below.
- version string
- Version of the ClickHouse server software.
- zookeeper
MdbClickhouse Cluster Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- access
MdbClickhouse Cluster Access Args 
- Access policy to the ClickHouse cluster. The structure is documented below.
- admin_password str
- A password used to authorize as user adminwhensql_user_managementenabled.
- backup_window_ Mdbstart Clickhouse Cluster Backup Window Start Args 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- clickhouse
MdbClickhouse Cluster Clickhouse Args 
- Configuration of the ClickHouse subcluster. The structure is documented below.
- cloud_storage MdbClickhouse Cluster Cloud Storage Args 
- copy_schema_ boolon_ new_ hosts 
- Whether to copy schema on new ClickHouse hosts.
- created_at str
- Timestamp of cluster creation.
- databases
Sequence[MdbClickhouse Cluster Database Args] 
- A database of the ClickHouse cluster. The structure is documented below.
- deletion_protection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description str
- Description of the shard group.
- environment str
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- folder_id str
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- format_schemas Sequence[MdbClickhouse Cluster Format Schema Args] 
- A set of protobuf or capnproto format schemas. The structure is documented below.
- health str
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- hosts
Sequence[MdbClickhouse Cluster Host Args] 
- A host of the ClickHouse cluster. The structure is documented below.
- labels Mapping[str, str]
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenance_window MdbClickhouse Cluster Maintenance Window Args 
- ml_models Sequence[MdbClickhouse Cluster Ml Model Args] 
- A group of machine learning models. The structure is documented below
- name str
- Graphite rollup configuration name.
- network_id str
- ID of the network, to which the ClickHouse cluster belongs.
- security_group_ Sequence[str]ids 
- A set of ids of security groups assigned to hosts of the cluster.
- service_account_ strid 
- ID of the service account used for access to Yandex Object Storage.
- shard_groups Sequence[MdbClickhouse Cluster Shard Group Args] 
- A group of clickhouse shards. The structure is documented below.
- sql_database_ boolmanagement 
- Grants adminuser database management permission.
- sql_user_ boolmanagement 
- Enables adminuser with user management permission.
- status str
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- users
Sequence[MdbClickhouse Cluster User Args] 
- A user of the ClickHouse cluster. The structure is documented below.
- version str
- Version of the ClickHouse server software.
- zookeeper
MdbClickhouse Cluster Zookeeper Args 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- access Property Map
- Access policy to the ClickHouse cluster. The structure is documented below.
- adminPassword String
- A password used to authorize as user adminwhensql_user_managementenabled.
- backupWindow Property MapStart 
- Time to start the daily backup, in the UTC timezone. The structure is documented below.
- clickhouse Property Map
- Configuration of the ClickHouse subcluster. The structure is documented below.
- cloudStorage Property Map
- copySchema BooleanOn New Hosts 
- Whether to copy schema on new ClickHouse hosts.
- createdAt String
- Timestamp of cluster creation.
- databases List<Property Map>
- A database of the ClickHouse cluster. The structure is documented below.
- deletionProtection Boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description String
- Description of the shard group.
- environment String
- Deployment environment of the ClickHouse cluster. Can be either PRESTABLEorPRODUCTION.
- folderId String
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- formatSchemas List<Property Map>
- A set of protobuf or capnproto format schemas. The structure is documented below.
- health String
- Aggregated health of the cluster. Can be ALIVE,DEGRADED,DEADorHEALTH_UNKNOWN. For more information seehealthfield of JSON representation in the official documentation.
- hosts List<Property Map>
- A host of the ClickHouse cluster. The structure is documented below.
- labels Map<String>
- A set of key/value label pairs to assign to the ClickHouse cluster.
- maintenanceWindow Property Map
- mlModels List<Property Map>
- A group of machine learning models. The structure is documented below
- name String
- Graphite rollup configuration name.
- networkId String
- ID of the network, to which the ClickHouse cluster belongs.
- securityGroup List<String>Ids 
- A set of ids of security groups assigned to hosts of the cluster.
- serviceAccount StringId 
- ID of the service account used for access to Yandex Object Storage.
- shardGroups List<Property Map>
- A group of clickhouse shards. The structure is documented below.
- sqlDatabase BooleanManagement 
- Grants adminuser database management permission.
- sqlUser BooleanManagement 
- Enables adminuser with user management permission.
- status String
- Status of the cluster. Can be CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- users List<Property Map>
- A user of the ClickHouse cluster. The structure is documented below.
- version String
- Version of the ClickHouse server software.
- zookeeper Property Map
- Configuration of the ZooKeeper subcluster. The structure is documented below.
Supporting Types
MdbClickhouseClusterAccess, MdbClickhouseClusterAccessArgs        
- DataLens bool
- Allow access for DataLens. Can be either trueorfalse.
- Metrika bool
- Allow access for Yandex.Metrika. Can be either trueorfalse.
- Serverless bool
- Allow access for Serverless. Can be either trueorfalse.
- WebSql bool
- Allow access for Web SQL. Can be either trueorfalse.
- DataLens bool
- Allow access for DataLens. Can be either trueorfalse.
- Metrika bool
- Allow access for Yandex.Metrika. Can be either trueorfalse.
- Serverless bool
- Allow access for Serverless. Can be either trueorfalse.
- WebSql bool
- Allow access for Web SQL. Can be either trueorfalse.
- dataLens Boolean
- Allow access for DataLens. Can be either trueorfalse.
- metrika Boolean
- Allow access for Yandex.Metrika. Can be either trueorfalse.
- serverless Boolean
- Allow access for Serverless. Can be either trueorfalse.
- webSql Boolean
- Allow access for Web SQL. Can be either trueorfalse.
- dataLens boolean
- Allow access for DataLens. Can be either trueorfalse.
- metrika boolean
- Allow access for Yandex.Metrika. Can be either trueorfalse.
- serverless boolean
- Allow access for Serverless. Can be either trueorfalse.
- webSql boolean
- Allow access for Web SQL. Can be either trueorfalse.
- data_lens bool
- Allow access for DataLens. Can be either trueorfalse.
- metrika bool
- Allow access for Yandex.Metrika. Can be either trueorfalse.
- serverless bool
- Allow access for Serverless. Can be either trueorfalse.
- web_sql bool
- Allow access for Web SQL. Can be either trueorfalse.
- dataLens Boolean
- Allow access for DataLens. Can be either trueorfalse.
- metrika Boolean
- Allow access for Yandex.Metrika. Can be either trueorfalse.
- serverless Boolean
- Allow access for Serverless. Can be either trueorfalse.
- webSql Boolean
- Allow access for Web SQL. Can be either trueorfalse.
MdbClickhouseClusterBackupWindowStart, MdbClickhouseClusterBackupWindowStartArgs            
MdbClickhouseClusterClickhouse, MdbClickhouseClusterClickhouseArgs        
- Resources
MdbClickhouse Cluster Clickhouse Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Config
MdbClickhouse Cluster Clickhouse Config 
- Main ClickHouse cluster configuration.
- Resources
MdbClickhouse Cluster Clickhouse Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Config
MdbClickhouse Cluster Clickhouse Config 
- Main ClickHouse cluster configuration.
- resources
MdbClickhouse Cluster Clickhouse Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- config
MdbClickhouse Cluster Clickhouse Config 
- Main ClickHouse cluster configuration.
- resources
MdbClickhouse Cluster Clickhouse Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- config
MdbClickhouse Cluster Clickhouse Config 
- Main ClickHouse cluster configuration.
- resources
MdbClickhouse Cluster Clickhouse Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- config
MdbClickhouse Cluster Clickhouse Config 
- Main ClickHouse cluster configuration.
- resources Property Map
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- config Property Map
- Main ClickHouse cluster configuration.
MdbClickhouseClusterClickhouseConfig, MdbClickhouseClusterClickhouseConfigArgs          
- BackgroundPool intSize 
- BackgroundSchedule intPool Size 
- Compressions
List<MdbClickhouse Cluster Clickhouse Config Compression> 
- Data compression configuration. The structure is documented below.
- GeobaseUri string
- GraphiteRollups List<MdbClickhouse Cluster Clickhouse Config Graphite Rollup> 
- Graphite rollup configuration. The structure is documented below.
- Kafka
MdbClickhouse Cluster Clickhouse Config Kafka 
- Kafka connection configuration. The structure is documented below.
- KafkaTopics List<MdbClickhouse Cluster Clickhouse Config Kafka Topic> 
- Kafka topic connection configuration. The structure is documented below.
- KeepAlive intTimeout 
- LogLevel string
- MarkCache intSize 
- MaxConcurrent intQueries 
- MaxConnections int
- MaxPartition intSize To Drop 
- MaxTable intSize To Drop 
- MergeTree MdbClickhouse Cluster Clickhouse Config Merge Tree 
- MergeTree engine configuration. The structure is documented below.
- MetricLog boolEnabled 
- MetricLog intRetention Size 
- MetricLog intRetention Time 
- PartLog intRetention Size 
- PartLog intRetention Time 
- QueryLog intRetention Size 
- QueryLog intRetention Time 
- QueryThread boolLog Enabled 
- QueryThread intLog Retention Size 
- QueryThread intLog Retention Time 
- Rabbitmq
MdbClickhouse Cluster Clickhouse Config Rabbitmq 
- RabbitMQ connection configuration. The structure is documented below.
- TextLog boolEnabled 
- TextLog stringLevel 
- TextLog intRetention Size 
- TextLog intRetention Time 
- Timezone string
- TraceLog boolEnabled 
- TraceLog intRetention Size 
- TraceLog intRetention Time 
- UncompressedCache intSize 
- BackgroundPool intSize 
- BackgroundSchedule intPool Size 
- Compressions
[]MdbClickhouse Cluster Clickhouse Config Compression 
- Data compression configuration. The structure is documented below.
- GeobaseUri string
- GraphiteRollups []MdbClickhouse Cluster Clickhouse Config Graphite Rollup 
- Graphite rollup configuration. The structure is documented below.
- Kafka
MdbClickhouse Cluster Clickhouse Config Kafka 
- Kafka connection configuration. The structure is documented below.
- KafkaTopics []MdbClickhouse Cluster Clickhouse Config Kafka Topic 
- Kafka topic connection configuration. The structure is documented below.
- KeepAlive intTimeout 
- LogLevel string
- MarkCache intSize 
- MaxConcurrent intQueries 
- MaxConnections int
- MaxPartition intSize To Drop 
- MaxTable intSize To Drop 
- MergeTree MdbClickhouse Cluster Clickhouse Config Merge Tree 
- MergeTree engine configuration. The structure is documented below.
- MetricLog boolEnabled 
- MetricLog intRetention Size 
- MetricLog intRetention Time 
- PartLog intRetention Size 
- PartLog intRetention Time 
- QueryLog intRetention Size 
- QueryLog intRetention Time 
- QueryThread boolLog Enabled 
- QueryThread intLog Retention Size 
- QueryThread intLog Retention Time 
- Rabbitmq
MdbClickhouse Cluster Clickhouse Config Rabbitmq 
- RabbitMQ connection configuration. The structure is documented below.
- TextLog boolEnabled 
- TextLog stringLevel 
- TextLog intRetention Size 
- TextLog intRetention Time 
- Timezone string
- TraceLog boolEnabled 
- TraceLog intRetention Size 
- TraceLog intRetention Time 
- UncompressedCache intSize 
- backgroundPool IntegerSize 
- backgroundSchedule IntegerPool Size 
- compressions
List<MdbClickhouse Cluster Clickhouse Config Compression> 
- Data compression configuration. The structure is documented below.
- geobaseUri String
- graphiteRollups List<MdbClickhouse Cluster Clickhouse Config Graphite Rollup> 
- Graphite rollup configuration. The structure is documented below.
- kafka
MdbClickhouse Cluster Clickhouse Config Kafka 
- Kafka connection configuration. The structure is documented below.
- kafkaTopics List<MdbClickhouse Cluster Clickhouse Config Kafka Topic> 
- Kafka topic connection configuration. The structure is documented below.
- keepAlive IntegerTimeout 
- logLevel String
- markCache IntegerSize 
- maxConcurrent IntegerQueries 
- maxConnections Integer
- maxPartition IntegerSize To Drop 
- maxTable IntegerSize To Drop 
- mergeTree MdbClickhouse Cluster Clickhouse Config Merge Tree 
- MergeTree engine configuration. The structure is documented below.
- metricLog BooleanEnabled 
- metricLog IntegerRetention Size 
- metricLog IntegerRetention Time 
- partLog IntegerRetention Size 
- partLog IntegerRetention Time 
- queryLog IntegerRetention Size 
- queryLog IntegerRetention Time 
- queryThread BooleanLog Enabled 
- queryThread IntegerLog Retention Size 
- queryThread IntegerLog Retention Time 
- rabbitmq
MdbClickhouse Cluster Clickhouse Config Rabbitmq 
- RabbitMQ connection configuration. The structure is documented below.
- textLog BooleanEnabled 
- textLog StringLevel 
- textLog IntegerRetention Size 
- textLog IntegerRetention Time 
- timezone String
- traceLog BooleanEnabled 
- traceLog IntegerRetention Size 
- traceLog IntegerRetention Time 
- uncompressedCache IntegerSize 
- backgroundPool numberSize 
- backgroundSchedule numberPool Size 
- compressions
MdbClickhouse Cluster Clickhouse Config Compression[] 
- Data compression configuration. The structure is documented below.
- geobaseUri string
- graphiteRollups MdbClickhouse Cluster Clickhouse Config Graphite Rollup[] 
- Graphite rollup configuration. The structure is documented below.
- kafka
MdbClickhouse Cluster Clickhouse Config Kafka 
- Kafka connection configuration. The structure is documented below.
- kafkaTopics MdbClickhouse Cluster Clickhouse Config Kafka Topic[] 
- Kafka topic connection configuration. The structure is documented below.
- keepAlive numberTimeout 
- logLevel string
- markCache numberSize 
- maxConcurrent numberQueries 
- maxConnections number
- maxPartition numberSize To Drop 
- maxTable numberSize To Drop 
- mergeTree MdbClickhouse Cluster Clickhouse Config Merge Tree 
- MergeTree engine configuration. The structure is documented below.
- metricLog booleanEnabled 
- metricLog numberRetention Size 
- metricLog numberRetention Time 
- partLog numberRetention Size 
- partLog numberRetention Time 
- queryLog numberRetention Size 
- queryLog numberRetention Time 
- queryThread booleanLog Enabled 
- queryThread numberLog Retention Size 
- queryThread numberLog Retention Time 
- rabbitmq
MdbClickhouse Cluster Clickhouse Config Rabbitmq 
- RabbitMQ connection configuration. The structure is documented below.
- textLog booleanEnabled 
- textLog stringLevel 
- textLog numberRetention Size 
- textLog numberRetention Time 
- timezone string
- traceLog booleanEnabled 
- traceLog numberRetention Size 
- traceLog numberRetention Time 
- uncompressedCache numberSize 
- background_pool_ intsize 
- background_schedule_ intpool_ size 
- compressions
Sequence[MdbClickhouse Cluster Clickhouse Config Compression] 
- Data compression configuration. The structure is documented below.
- geobase_uri str
- graphite_rollups Sequence[MdbClickhouse Cluster Clickhouse Config Graphite Rollup] 
- Graphite rollup configuration. The structure is documented below.
- kafka
MdbClickhouse Cluster Clickhouse Config Kafka 
- Kafka connection configuration. The structure is documented below.
- kafka_topics Sequence[MdbClickhouse Cluster Clickhouse Config Kafka Topic] 
- Kafka topic connection configuration. The structure is documented below.
- keep_alive_ inttimeout 
- log_level str
- mark_cache_ intsize 
- max_concurrent_ intqueries 
- max_connections int
- max_partition_ intsize_ to_ drop 
- max_table_ intsize_ to_ drop 
- merge_tree MdbClickhouse Cluster Clickhouse Config Merge Tree 
- MergeTree engine configuration. The structure is documented below.
- metric_log_ boolenabled 
- metric_log_ intretention_ size 
- metric_log_ intretention_ time 
- part_log_ intretention_ size 
- part_log_ intretention_ time 
- query_log_ intretention_ size 
- query_log_ intretention_ time 
- query_thread_ boollog_ enabled 
- query_thread_ intlog_ retention_ size 
- query_thread_ intlog_ retention_ time 
- rabbitmq
MdbClickhouse Cluster Clickhouse Config Rabbitmq 
- RabbitMQ connection configuration. The structure is documented below.
- text_log_ boolenabled 
- text_log_ strlevel 
- text_log_ intretention_ size 
- text_log_ intretention_ time 
- timezone str
- trace_log_ boolenabled 
- trace_log_ intretention_ size 
- trace_log_ intretention_ time 
- uncompressed_cache_ intsize 
- backgroundPool NumberSize 
- backgroundSchedule NumberPool Size 
- compressions List<Property Map>
- Data compression configuration. The structure is documented below.
- geobaseUri String
- graphiteRollups List<Property Map>
- Graphite rollup configuration. The structure is documented below.
- kafka Property Map
- Kafka connection configuration. The structure is documented below.
- kafkaTopics List<Property Map>
- Kafka topic connection configuration. The structure is documented below.
- keepAlive NumberTimeout 
- logLevel String
- markCache NumberSize 
- maxConcurrent NumberQueries 
- maxConnections Number
- maxPartition NumberSize To Drop 
- maxTable NumberSize To Drop 
- mergeTree Property Map
- MergeTree engine configuration. The structure is documented below.
- metricLog BooleanEnabled 
- metricLog NumberRetention Size 
- metricLog NumberRetention Time 
- partLog NumberRetention Size 
- partLog NumberRetention Time 
- queryLog NumberRetention Size 
- queryLog NumberRetention Time 
- queryThread BooleanLog Enabled 
- queryThread NumberLog Retention Size 
- queryThread NumberLog Retention Time 
- rabbitmq Property Map
- RabbitMQ connection configuration. The structure is documented below.
- textLog BooleanEnabled 
- textLog StringLevel 
- textLog NumberRetention Size 
- textLog NumberRetention Time 
- timezone String
- traceLog BooleanEnabled 
- traceLog NumberRetention Size 
- traceLog NumberRetention Time 
- uncompressedCache NumberSize 
MdbClickhouseClusterClickhouseConfigCompression, MdbClickhouseClusterClickhouseConfigCompressionArgs            
- Method string
- Method: Compression method. Two methods are available: LZ4 and zstd.
- MinPart intSize 
- Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- MinPart doubleSize Ratio 
- Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- Method string
- Method: Compression method. Two methods are available: LZ4 and zstd.
- MinPart intSize 
- Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- MinPart float64Size Ratio 
- Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- method String
- Method: Compression method. Two methods are available: LZ4 and zstd.
- minPart IntegerSize 
- Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- minPart DoubleSize Ratio 
- Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- method string
- Method: Compression method. Two methods are available: LZ4 and zstd.
- minPart numberSize 
- Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- minPart numberSize Ratio 
- Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- method str
- Method: Compression method. Two methods are available: LZ4 and zstd.
- min_part_ intsize 
- Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- min_part_ floatsize_ ratio 
- Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
- method String
- Method: Compression method. Two methods are available: LZ4 and zstd.
- minPart NumberSize 
- Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
- minPart NumberSize Ratio 
- Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
MdbClickhouseClusterClickhouseConfigGraphiteRollup, MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs              
- Name string
- Graphite rollup configuration name.
- Patterns
List<MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern> 
- Set of thinning rules.
- Name string
- Graphite rollup configuration name.
- Patterns
[]MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern 
- Set of thinning rules.
- name String
- Graphite rollup configuration name.
- patterns
List<MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern> 
- Set of thinning rules.
- name string
- Graphite rollup configuration name.
- patterns
MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern[] 
- Set of thinning rules.
- name str
- Graphite rollup configuration name.
- patterns
Sequence[MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern] 
- Set of thinning rules.
- name String
- Graphite rollup configuration name.
- patterns List<Property Map>
- Set of thinning rules.
MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern, MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs                
- Function string
- Aggregation function name.
- Regexp string
- Regular expression that the metric name must match.
- Retentions
List<MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention> 
- Retain parameters.
- Function string
- Aggregation function name.
- Regexp string
- Regular expression that the metric name must match.
- Retentions
[]MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention 
- Retain parameters.
- function String
- Aggregation function name.
- regexp String
- Regular expression that the metric name must match.
- retentions
List<MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention> 
- Retain parameters.
- function string
- Aggregation function name.
- regexp string
- Regular expression that the metric name must match.
- retentions
MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention[] 
- Retain parameters.
- function str
- Aggregation function name.
- regexp str
- Regular expression that the metric name must match.
- retentions
Sequence[MdbClickhouse Cluster Clickhouse Config Graphite Rollup Pattern Retention] 
- Retain parameters.
- function String
- Aggregation function name.
- regexp String
- Regular expression that the metric name must match.
- retentions List<Property Map>
- Retain parameters.
MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention, MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs                  
MdbClickhouseClusterClickhouseConfigKafka, MdbClickhouseClusterClickhouseConfigKafkaArgs            
- SaslMechanism string
- SASL mechanism used in kafka authentication.
- SaslPassword string
- User password on kafka server.
- SaslUsername string
- Username on kafka server.
- SecurityProtocol string
- Security protocol used to connect to kafka server.
- SaslMechanism string
- SASL mechanism used in kafka authentication.
- SaslPassword string
- User password on kafka server.
- SaslUsername string
- Username on kafka server.
- SecurityProtocol string
- Security protocol used to connect to kafka server.
- saslMechanism String
- SASL mechanism used in kafka authentication.
- saslPassword String
- User password on kafka server.
- saslUsername String
- Username on kafka server.
- securityProtocol String
- Security protocol used to connect to kafka server.
- saslMechanism string
- SASL mechanism used in kafka authentication.
- saslPassword string
- User password on kafka server.
- saslUsername string
- Username on kafka server.
- securityProtocol string
- Security protocol used to connect to kafka server.
- sasl_mechanism str
- SASL mechanism used in kafka authentication.
- sasl_password str
- User password on kafka server.
- sasl_username str
- Username on kafka server.
- security_protocol str
- Security protocol used to connect to kafka server.
- saslMechanism String
- SASL mechanism used in kafka authentication.
- saslPassword String
- User password on kafka server.
- saslUsername String
- Username on kafka server.
- securityProtocol String
- Security protocol used to connect to kafka server.
MdbClickhouseClusterClickhouseConfigKafkaTopic, MdbClickhouseClusterClickhouseConfigKafkaTopicArgs              
- Name string
- Graphite rollup configuration name.
- Settings
MdbClickhouse Cluster Clickhouse Config Kafka Topic Settings 
- Kafka connection settngs sanem as kafkablock.
- Name string
- Graphite rollup configuration name.
- Settings
MdbClickhouse Cluster Clickhouse Config Kafka Topic Settings 
- Kafka connection settngs sanem as kafkablock.
- name String
- Graphite rollup configuration name.
- settings
MdbClickhouse Cluster Clickhouse Config Kafka Topic Settings 
- Kafka connection settngs sanem as kafkablock.
- name string
- Graphite rollup configuration name.
- settings
MdbClickhouse Cluster Clickhouse Config Kafka Topic Settings 
- Kafka connection settngs sanem as kafkablock.
- name str
- Graphite rollup configuration name.
- settings
MdbClickhouse Cluster Clickhouse Config Kafka Topic Settings 
- Kafka connection settngs sanem as kafkablock.
- name String
- Graphite rollup configuration name.
- settings Property Map
- Kafka connection settngs sanem as kafkablock.
MdbClickhouseClusterClickhouseConfigKafkaTopicSettings, MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs                
- SaslMechanism string
- SASL mechanism used in kafka authentication.
- SaslPassword string
- User password on kafka server.
- SaslUsername string
- Username on kafka server.
- SecurityProtocol string
- Security protocol used to connect to kafka server.
- SaslMechanism string
- SASL mechanism used in kafka authentication.
- SaslPassword string
- User password on kafka server.
- SaslUsername string
- Username on kafka server.
- SecurityProtocol string
- Security protocol used to connect to kafka server.
- saslMechanism String
- SASL mechanism used in kafka authentication.
- saslPassword String
- User password on kafka server.
- saslUsername String
- Username on kafka server.
- securityProtocol String
- Security protocol used to connect to kafka server.
- saslMechanism string
- SASL mechanism used in kafka authentication.
- saslPassword string
- User password on kafka server.
- saslUsername string
- Username on kafka server.
- securityProtocol string
- Security protocol used to connect to kafka server.
- sasl_mechanism str
- SASL mechanism used in kafka authentication.
- sasl_password str
- User password on kafka server.
- sasl_username str
- Username on kafka server.
- security_protocol str
- Security protocol used to connect to kafka server.
- saslMechanism String
- SASL mechanism used in kafka authentication.
- saslPassword String
- User password on kafka server.
- saslUsername String
- Username on kafka server.
- securityProtocol String
- Security protocol used to connect to kafka server.
MdbClickhouseClusterClickhouseConfigMergeTree, MdbClickhouseClusterClickhouseConfigMergeTreeArgs              
- MaxBytes intTo Merge At Min Space In Pool 
- Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- MaxReplicated intMerges In Queue 
- Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- NumberOf intFree Entries In Pool To Lower Max Size Of Merge 
- Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- PartsTo intDelay Insert 
- Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- PartsTo intThrow Insert 
- Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- ReplicatedDeduplication intWindow 
- Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- ReplicatedDeduplication intWindow Seconds 
- Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- MaxBytes intTo Merge At Min Space In Pool 
- Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- MaxReplicated intMerges In Queue 
- Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- NumberOf intFree Entries In Pool To Lower Max Size Of Merge 
- Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- PartsTo intDelay Insert 
- Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- PartsTo intThrow Insert 
- Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- ReplicatedDeduplication intWindow 
- Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- ReplicatedDeduplication intWindow Seconds 
- Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- maxBytes IntegerTo Merge At Min Space In Pool 
- Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- maxReplicated IntegerMerges In Queue 
- Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- numberOf IntegerFree Entries In Pool To Lower Max Size Of Merge 
- Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- partsTo IntegerDelay Insert 
- Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- partsTo IntegerThrow Insert 
- Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- replicatedDeduplication IntegerWindow 
- Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- replicatedDeduplication IntegerWindow Seconds 
- Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- maxBytes numberTo Merge At Min Space In Pool 
- Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- maxReplicated numberMerges In Queue 
- Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- numberOf numberFree Entries In Pool To Lower Max Size Of Merge 
- Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- partsTo numberDelay Insert 
- Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- partsTo numberThrow Insert 
- Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- replicatedDeduplication numberWindow 
- Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- replicatedDeduplication numberWindow Seconds 
- Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- max_bytes_ intto_ merge_ at_ min_ space_ in_ pool 
- Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- max_replicated_ intmerges_ in_ queue 
- Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- number_of_ intfree_ entries_ in_ pool_ to_ lower_ max_ size_ of_ merge 
- Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- parts_to_ intdelay_ insert 
- Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- parts_to_ intthrow_ insert 
- Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- replicated_deduplication_ intwindow 
- Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- replicated_deduplication_ intwindow_ seconds 
- Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
- maxBytes NumberTo Merge At Min Space In Pool 
- Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
- maxReplicated NumberMerges In Queue 
- Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
- numberOf NumberFree Entries In Pool To Lower Max Size Of Merge 
- Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
- partsTo NumberDelay Insert 
- Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
- partsTo NumberThrow Insert 
- Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
- replicatedDeduplication NumberWindow 
- Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
- replicatedDeduplication NumberWindow Seconds 
- Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
MdbClickhouseClusterClickhouseConfigRabbitmq, MdbClickhouseClusterClickhouseConfigRabbitmqArgs            
MdbClickhouseClusterClickhouseResources, MdbClickhouseClusterClickhouseResourcesArgs          
- DiskSize int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- DiskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- ResourcePreset stringId 
- DiskSize int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- DiskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- ResourcePreset stringId 
- diskSize Integer
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType StringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset StringId 
- diskSize number
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset stringId 
- disk_size int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk_type_ strid 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource_preset_ strid 
- diskSize Number
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType StringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset StringId 
MdbClickhouseClusterCloudStorage, MdbClickhouseClusterCloudStorageArgs          
- Enabled bool
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either trueorfalse.
- Enabled bool
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either trueorfalse.
- enabled Boolean
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either trueorfalse.
- enabled boolean
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either trueorfalse.
- enabled bool
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either trueorfalse.
- enabled Boolean
- Whether to use Yandex Object Storage for storing ClickHouse data. Can be either trueorfalse.
MdbClickhouseClusterDatabase, MdbClickhouseClusterDatabaseArgs        
- Name string
- Graphite rollup configuration name.
- Name string
- Graphite rollup configuration name.
- name String
- Graphite rollup configuration name.
- name string
- Graphite rollup configuration name.
- name str
- Graphite rollup configuration name.
- name String
- Graphite rollup configuration name.
MdbClickhouseClusterFormatSchema, MdbClickhouseClusterFormatSchemaArgs          
MdbClickhouseClusterHost, MdbClickhouseClusterHostArgs        
- Type string
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- Zone string
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- AssignPublic boolIp 
- Sets whether the host should get a public IP address on creation. Can be either trueorfalse.
- Fqdn string
- The fully qualified domain name of the host.
- string
- The name of the shard to which the host belongs.
- SubnetId string
- The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- Type string
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- Zone string
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- AssignPublic boolIp 
- Sets whether the host should get a public IP address on creation. Can be either trueorfalse.
- Fqdn string
- The fully qualified domain name of the host.
- string
- The name of the shard to which the host belongs.
- SubnetId string
- The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- type String
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- zone String
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- assignPublic BooleanIp 
- Sets whether the host should get a public IP address on creation. Can be either trueorfalse.
- fqdn String
- The fully qualified domain name of the host.
- String
- The name of the shard to which the host belongs.
- subnetId String
- The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- type string
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- zone string
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- assignPublic booleanIp 
- Sets whether the host should get a public IP address on creation. Can be either trueorfalse.
- fqdn string
- The fully qualified domain name of the host.
- string
- The name of the shard to which the host belongs.
- subnetId string
- The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- type str
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- zone str
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- assign_public_ boolip 
- Sets whether the host should get a public IP address on creation. Can be either trueorfalse.
- fqdn str
- The fully qualified domain name of the host.
- str
- The name of the shard to which the host belongs.
- subnet_id str
- The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
- type String
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- zone String
- The availability zone where the ClickHouse host will be created. For more information see the official documentation.
- assignPublic BooleanIp 
- Sets whether the host should get a public IP address on creation. Can be either trueorfalse.
- fqdn String
- The fully qualified domain name of the host.
- String
- The name of the shard to which the host belongs.
- subnetId String
- The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
MdbClickhouseClusterMaintenanceWindow, MdbClickhouseClusterMaintenanceWindowArgs          
- Type string
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- Day string
- Day of week for maintenance window if window type is weekly. Possible values: MON,TUE,WED,THU,FRI,SAT,SUN.
- Hour int
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- Type string
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- Day string
- Day of week for maintenance window if window type is weekly. Possible values: MON,TUE,WED,THU,FRI,SAT,SUN.
- Hour int
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- type String
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- day String
- Day of week for maintenance window if window type is weekly. Possible values: MON,TUE,WED,THU,FRI,SAT,SUN.
- hour Integer
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- type string
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- day string
- Day of week for maintenance window if window type is weekly. Possible values: MON,TUE,WED,THU,FRI,SAT,SUN.
- hour number
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- type str
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- day str
- Day of week for maintenance window if window type is weekly. Possible values: MON,TUE,WED,THU,FRI,SAT,SUN.
- hour int
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
- type String
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- day String
- Day of week for maintenance window if window type is weekly. Possible values: MON,TUE,WED,THU,FRI,SAT,SUN.
- hour Number
- Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
MdbClickhouseClusterMlModel, MdbClickhouseClusterMlModelArgs          
MdbClickhouseClusterShardGroup, MdbClickhouseClusterShardGroupArgs          
- Name string
- Graphite rollup configuration name.
- List<string>
- List of shards names that belong to the shard group.
- Description string
- Description of the shard group.
- Name string
- Graphite rollup configuration name.
- []string
- List of shards names that belong to the shard group.
- Description string
- Description of the shard group.
- name String
- Graphite rollup configuration name.
- List<String>
- List of shards names that belong to the shard group.
- description String
- Description of the shard group.
- name string
- Graphite rollup configuration name.
- string[]
- List of shards names that belong to the shard group.
- description string
- Description of the shard group.
- name str
- Graphite rollup configuration name.
- Sequence[str]
- List of shards names that belong to the shard group.
- description str
- Description of the shard group.
- name String
- Graphite rollup configuration name.
- List<String>
- List of shards names that belong to the shard group.
- description String
- Description of the shard group.
MdbClickhouseClusterUser, MdbClickhouseClusterUserArgs        
- Name string
- Graphite rollup configuration name.
- Password string
- RabbitMQ user password.
- Permissions
List<MdbClickhouse Cluster User Permission> 
- Set of permissions granted to the user. The structure is documented below.
- Quotas
List<MdbClickhouse Cluster User Quota> 
- Set of user quotas. The structure is documented below.
- Settings
MdbClickhouse Cluster User Settings 
- Kafka connection settngs sanem as kafkablock.
- Name string
- Graphite rollup configuration name.
- Password string
- RabbitMQ user password.
- Permissions
[]MdbClickhouse Cluster User Permission 
- Set of permissions granted to the user. The structure is documented below.
- Quotas
[]MdbClickhouse Cluster User Quota 
- Set of user quotas. The structure is documented below.
- Settings
MdbClickhouse Cluster User Settings 
- Kafka connection settngs sanem as kafkablock.
- name String
- Graphite rollup configuration name.
- password String
- RabbitMQ user password.
- permissions
List<MdbClickhouse Cluster User Permission> 
- Set of permissions granted to the user. The structure is documented below.
- quotas
List<MdbClickhouse Cluster User Quota> 
- Set of user quotas. The structure is documented below.
- settings
MdbClickhouse Cluster User Settings 
- Kafka connection settngs sanem as kafkablock.
- name string
- Graphite rollup configuration name.
- password string
- RabbitMQ user password.
- permissions
MdbClickhouse Cluster User Permission[] 
- Set of permissions granted to the user. The structure is documented below.
- quotas
MdbClickhouse Cluster User Quota[] 
- Set of user quotas. The structure is documented below.
- settings
MdbClickhouse Cluster User Settings 
- Kafka connection settngs sanem as kafkablock.
- name str
- Graphite rollup configuration name.
- password str
- RabbitMQ user password.
- permissions
Sequence[MdbClickhouse Cluster User Permission] 
- Set of permissions granted to the user. The structure is documented below.
- quotas
Sequence[MdbClickhouse Cluster User Quota] 
- Set of user quotas. The structure is documented below.
- settings
MdbClickhouse Cluster User Settings 
- Kafka connection settngs sanem as kafkablock.
- name String
- Graphite rollup configuration name.
- password String
- RabbitMQ user password.
- permissions List<Property Map>
- Set of permissions granted to the user. The structure is documented below.
- quotas List<Property Map>
- Set of user quotas. The structure is documented below.
- settings Property Map
- Kafka connection settngs sanem as kafkablock.
MdbClickhouseClusterUserPermission, MdbClickhouseClusterUserPermissionArgs          
- DatabaseName string
- The name of the database that the permission grants access to.
- DatabaseName string
- The name of the database that the permission grants access to.
- databaseName String
- The name of the database that the permission grants access to.
- databaseName string
- The name of the database that the permission grants access to.
- database_name str
- The name of the database that the permission grants access to.
- databaseName String
- The name of the database that the permission grants access to.
MdbClickhouseClusterUserQuota, MdbClickhouseClusterUserQuotaArgs          
- IntervalDuration int
- Duration of interval for quota in milliseconds.
- Errors int
- The number of queries that threw exception.
- ExecutionTime int
- The total query execution time, in milliseconds (wall time).
- Queries int
- The total number of queries.
- ReadRows int
- The total number of source rows read from tables for running the query, on all remote servers.
- ResultRows int
- The total number of rows given as the result.
- IntervalDuration int
- Duration of interval for quota in milliseconds.
- Errors int
- The number of queries that threw exception.
- ExecutionTime int
- The total query execution time, in milliseconds (wall time).
- Queries int
- The total number of queries.
- ReadRows int
- The total number of source rows read from tables for running the query, on all remote servers.
- ResultRows int
- The total number of rows given as the result.
- intervalDuration Integer
- Duration of interval for quota in milliseconds.
- errors Integer
- The number of queries that threw exception.
- executionTime Integer
- The total query execution time, in milliseconds (wall time).
- queries Integer
- The total number of queries.
- readRows Integer
- The total number of source rows read from tables for running the query, on all remote servers.
- resultRows Integer
- The total number of rows given as the result.
- intervalDuration number
- Duration of interval for quota in milliseconds.
- errors number
- The number of queries that threw exception.
- executionTime number
- The total query execution time, in milliseconds (wall time).
- queries number
- The total number of queries.
- readRows number
- The total number of source rows read from tables for running the query, on all remote servers.
- resultRows number
- The total number of rows given as the result.
- interval_duration int
- Duration of interval for quota in milliseconds.
- errors int
- The number of queries that threw exception.
- execution_time int
- The total query execution time, in milliseconds (wall time).
- queries int
- The total number of queries.
- read_rows int
- The total number of source rows read from tables for running the query, on all remote servers.
- result_rows int
- The total number of rows given as the result.
- intervalDuration Number
- Duration of interval for quota in milliseconds.
- errors Number
- The number of queries that threw exception.
- executionTime Number
- The total query execution time, in milliseconds (wall time).
- queries Number
- The total number of queries.
- readRows Number
- The total number of source rows read from tables for running the query, on all remote servers.
- resultRows Number
- The total number of rows given as the result.
MdbClickhouseClusterUserSettings, MdbClickhouseClusterUserSettingsArgs          
- AddHttp boolCors Header 
- Include CORS headers in HTTP responces.
- AllowDdl bool
- Allows or denies DDL queries.
- Compile bool
- Enable compilation of queries.
- CompileExpressions bool
- Turn on expression compilation.
- ConnectTimeout int
- Connect timeout in milliseconds on the socket used for communicating with the client.
- CountDistinct stringImplementation 
- Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- DistinctOverflow stringMode 
- Sets behaviour on overflow when using DISTINCT. Possible values:
- DistributedAggregation boolMemory Efficient 
- Determine the behavior of distributed subqueries.
- DistributedDdl intTask Timeout 
- Timeout for DDL queries, in milliseconds.
- DistributedProduct stringMode 
- Changes the behaviour of distributed subqueries.
- EmptyResult boolFor Aggregation By Empty Set 
- Allows to retunr empty result.
- EnableHttp boolCompression 
- Enables or disables data compression in the response to an HTTP request.
- FallbackTo boolStale Replicas For Distributed Queries 
- Forces a query to an out-of-date replica if updated data is not available.
- ForceIndex boolBy Date 
- Disables query execution if the index can’t be used by date.
- ForcePrimary boolKey 
- Disables query execution if indexing by the primary key is not possible.
- GroupBy stringOverflow Mode 
- Sets behaviour on overflow while GROUP BY operation. Possible values:
- GroupBy intTwo Level Threshold 
- Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- GroupBy intTwo Level Threshold Bytes 
- Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- HttpConnection intTimeout 
- Timeout for HTTP connection in milliseconds.
- HttpHeaders intProgress Interval 
- Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- HttpReceive intTimeout 
- Timeout for HTTP connection in milliseconds.
- HttpSend intTimeout 
- Timeout for HTTP connection in milliseconds.
- InputFormat boolDefaults For Omitted Fields 
- When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- InputFormat boolValues Interpret Expressions 
- Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- InsertQuorum int
- Enables the quorum writes.
- InsertQuorum intTimeout 
- Write to a quorum timeout in milliseconds.
- JoinOverflow stringMode 
- Sets behaviour on overflow in JOIN. Possible values:
- JoinUse boolNulls 
- Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- JoinedSubquery boolRequires Alias 
- Require aliases for subselects and table functions in FROM that more than one table is present.
- LowCardinality boolAllow In Native Format 
- Allows or restricts using the LowCardinality data type with the Native format.
- MaxAst intDepth 
- Maximum abstract syntax tree depth.
- MaxAst intElements 
- Maximum abstract syntax tree elements.
- MaxBlock intSize 
- A recommendation for what size of the block (in a count of rows) to load from tables.
- MaxBytes intBefore External Group By 
- Limit in bytes for using memoru for GROUP BY before using swap on disk.
- MaxBytes intBefore External Sort 
- This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- MaxBytes intIn Distinct 
- Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- MaxBytes intIn Join 
- Limit on maximum size of the hash table for JOIN, in bytes.
- MaxBytes intIn Set 
- Limit on the number of bytes in the set resulting from the execution of the IN section.
- MaxBytes intTo Read 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- MaxBytes intTo Sort 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- MaxBytes intTo Transfer 
- Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- MaxColumns intTo Read 
- Limits the maximum number of columns that can be read from a table in a single query.
- MaxExecution intTime 
- Limits the maximum query execution time in milliseconds.
- MaxExpanded intAst Elements 
- Maximum abstract syntax tree depth after after expansion of aliases.
- MaxInsert intBlock Size 
- The size of blocks (in a count of rows) to form for insertion into a table.
- MaxMemory intUsage 
- Limits the maximum memory usage (in bytes) for processing queries on a single server.
- MaxMemory intUsage For User 
- Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- MaxNetwork intBandwidth 
- Limits the speed of the data exchange over the network in bytes per second.
- MaxNetwork intBandwidth For User 
- Limits the speed of the data exchange over the network in bytes per second.
- MaxQuery intSize 
- The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- MaxReplica intDelay For Distributed Queries 
- Disables lagging replicas for distributed queries.
- MaxResult intBytes 
- Limits the number of bytes in the result.
- MaxResult intRows 
- Limits the number of rows in the result.
- MaxRows intIn Distinct 
- Limits the maximum number of different rows when using DISTINCT.
- MaxRows intIn Join 
- Limit on maximum size of the hash table for JOIN, in rows.
- MaxRows intIn Set 
- Limit on the number of rows in the set resulting from the execution of the IN section.
- MaxRows intTo Group By 
- Limits the maximum number of unique keys received from aggregation function.
- MaxRows intTo Read 
- Limits the maximum number of rows that can be read from a table when running a query.
- MaxRows intTo Sort 
- Limits the maximum number of rows that can be read from a table for sorting.
- MaxRows intTo Transfer 
- Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- MaxTemporary intColumns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- MaxTemporary intNon Const Columns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- MaxThreads int
- The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- MergeTree intMax Bytes To Use Cache 
- If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- MergeTree intMax Rows To Use Cache 
- If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- MergeTree intMin Bytes For Concurrent Read 
- If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- MergeTree intMin Rows For Concurrent Read 
- If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- MinBytes intTo Use Direct Io 
- The minimum data volume required for using direct I/O access to the storage disk.
- MinCount intTo Compile 
- How many times to potentially use a compiled chunk of code before running compilation.
- MinCount intTo Compile Expression 
- A query waits for expression compilation process to complete prior to continuing execution.
- MinExecution intSpeed 
- Minimal execution speed in rows per second.
- MinExecution intSpeed Bytes 
- Minimal execution speed in bytes per second.
- MinInsert intBlock Size Bytes 
- Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- MinInsert intBlock Size Rows 
- Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- OutputFormat boolJson Quote64bit Integers 
- If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- OutputFormat boolJson Quote Denormals 
- Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- Priority int
- Query priority.
- QuotaMode string
- Quota accounting mode.
- ReadOverflow stringMode 
- Sets behaviour on overflow while read. Possible values:
- Readonly int
- Restricts permissions for reading data, write data and change settings queries.
- ReceiveTimeout int
- Receive timeout in milliseconds on the socket used for communicating with the client.
- ReplicationAlter intPartitions Sync 
- For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- ResultOverflow stringMode 
- Sets behaviour on overflow in result. Possible values:
- SelectSequential boolConsistency 
- Enables or disables sequential consistency for SELECT queries.
- SendProgress boolIn Http Headers 
- Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- SendTimeout int
- Send timeout in milliseconds on the socket used for communicating with the client.
- SetOverflow stringMode 
- Sets behaviour on overflow in the set resulting. Possible values:
- bool
- Enables or disables silently skipping of unavailable shards.
- SortOverflow stringMode 
- Sets behaviour on overflow while sort. Possible values:
- TimeoutOverflow stringMode 
- Sets behaviour on overflow. Possible values:
- TransferOverflow stringMode 
- Sets behaviour on overflow. Possible values:
- TransformNull boolIn 
- Enables equality of NULL values for IN operator.
- UseUncompressed boolCache 
- Whether to use a cache of uncompressed blocks.
- AddHttp boolCors Header 
- Include CORS headers in HTTP responces.
- AllowDdl bool
- Allows or denies DDL queries.
- Compile bool
- Enable compilation of queries.
- CompileExpressions bool
- Turn on expression compilation.
- ConnectTimeout int
- Connect timeout in milliseconds on the socket used for communicating with the client.
- CountDistinct stringImplementation 
- Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- DistinctOverflow stringMode 
- Sets behaviour on overflow when using DISTINCT. Possible values:
- DistributedAggregation boolMemory Efficient 
- Determine the behavior of distributed subqueries.
- DistributedDdl intTask Timeout 
- Timeout for DDL queries, in milliseconds.
- DistributedProduct stringMode 
- Changes the behaviour of distributed subqueries.
- EmptyResult boolFor Aggregation By Empty Set 
- Allows to retunr empty result.
- EnableHttp boolCompression 
- Enables or disables data compression in the response to an HTTP request.
- FallbackTo boolStale Replicas For Distributed Queries 
- Forces a query to an out-of-date replica if updated data is not available.
- ForceIndex boolBy Date 
- Disables query execution if the index can’t be used by date.
- ForcePrimary boolKey 
- Disables query execution if indexing by the primary key is not possible.
- GroupBy stringOverflow Mode 
- Sets behaviour on overflow while GROUP BY operation. Possible values:
- GroupBy intTwo Level Threshold 
- Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- GroupBy intTwo Level Threshold Bytes 
- Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- HttpConnection intTimeout 
- Timeout for HTTP connection in milliseconds.
- HttpHeaders intProgress Interval 
- Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- HttpReceive intTimeout 
- Timeout for HTTP connection in milliseconds.
- HttpSend intTimeout 
- Timeout for HTTP connection in milliseconds.
- InputFormat boolDefaults For Omitted Fields 
- When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- InputFormat boolValues Interpret Expressions 
- Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- InsertQuorum int
- Enables the quorum writes.
- InsertQuorum intTimeout 
- Write to a quorum timeout in milliseconds.
- JoinOverflow stringMode 
- Sets behaviour on overflow in JOIN. Possible values:
- JoinUse boolNulls 
- Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- JoinedSubquery boolRequires Alias 
- Require aliases for subselects and table functions in FROM that more than one table is present.
- LowCardinality boolAllow In Native Format 
- Allows or restricts using the LowCardinality data type with the Native format.
- MaxAst intDepth 
- Maximum abstract syntax tree depth.
- MaxAst intElements 
- Maximum abstract syntax tree elements.
- MaxBlock intSize 
- A recommendation for what size of the block (in a count of rows) to load from tables.
- MaxBytes intBefore External Group By 
- Limit in bytes for using memoru for GROUP BY before using swap on disk.
- MaxBytes intBefore External Sort 
- This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- MaxBytes intIn Distinct 
- Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- MaxBytes intIn Join 
- Limit on maximum size of the hash table for JOIN, in bytes.
- MaxBytes intIn Set 
- Limit on the number of bytes in the set resulting from the execution of the IN section.
- MaxBytes intTo Read 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- MaxBytes intTo Sort 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- MaxBytes intTo Transfer 
- Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- MaxColumns intTo Read 
- Limits the maximum number of columns that can be read from a table in a single query.
- MaxExecution intTime 
- Limits the maximum query execution time in milliseconds.
- MaxExpanded intAst Elements 
- Maximum abstract syntax tree depth after after expansion of aliases.
- MaxInsert intBlock Size 
- The size of blocks (in a count of rows) to form for insertion into a table.
- MaxMemory intUsage 
- Limits the maximum memory usage (in bytes) for processing queries on a single server.
- MaxMemory intUsage For User 
- Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- MaxNetwork intBandwidth 
- Limits the speed of the data exchange over the network in bytes per second.
- MaxNetwork intBandwidth For User 
- Limits the speed of the data exchange over the network in bytes per second.
- MaxQuery intSize 
- The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- MaxReplica intDelay For Distributed Queries 
- Disables lagging replicas for distributed queries.
- MaxResult intBytes 
- Limits the number of bytes in the result.
- MaxResult intRows 
- Limits the number of rows in the result.
- MaxRows intIn Distinct 
- Limits the maximum number of different rows when using DISTINCT.
- MaxRows intIn Join 
- Limit on maximum size of the hash table for JOIN, in rows.
- MaxRows intIn Set 
- Limit on the number of rows in the set resulting from the execution of the IN section.
- MaxRows intTo Group By 
- Limits the maximum number of unique keys received from aggregation function.
- MaxRows intTo Read 
- Limits the maximum number of rows that can be read from a table when running a query.
- MaxRows intTo Sort 
- Limits the maximum number of rows that can be read from a table for sorting.
- MaxRows intTo Transfer 
- Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- MaxTemporary intColumns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- MaxTemporary intNon Const Columns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- MaxThreads int
- The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- MergeTree intMax Bytes To Use Cache 
- If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- MergeTree intMax Rows To Use Cache 
- If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- MergeTree intMin Bytes For Concurrent Read 
- If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- MergeTree intMin Rows For Concurrent Read 
- If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- MinBytes intTo Use Direct Io 
- The minimum data volume required for using direct I/O access to the storage disk.
- MinCount intTo Compile 
- How many times to potentially use a compiled chunk of code before running compilation.
- MinCount intTo Compile Expression 
- A query waits for expression compilation process to complete prior to continuing execution.
- MinExecution intSpeed 
- Minimal execution speed in rows per second.
- MinExecution intSpeed Bytes 
- Minimal execution speed in bytes per second.
- MinInsert intBlock Size Bytes 
- Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- MinInsert intBlock Size Rows 
- Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- OutputFormat boolJson Quote64bit Integers 
- If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- OutputFormat boolJson Quote Denormals 
- Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- Priority int
- Query priority.
- QuotaMode string
- Quota accounting mode.
- ReadOverflow stringMode 
- Sets behaviour on overflow while read. Possible values:
- Readonly int
- Restricts permissions for reading data, write data and change settings queries.
- ReceiveTimeout int
- Receive timeout in milliseconds on the socket used for communicating with the client.
- ReplicationAlter intPartitions Sync 
- For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- ResultOverflow stringMode 
- Sets behaviour on overflow in result. Possible values:
- SelectSequential boolConsistency 
- Enables or disables sequential consistency for SELECT queries.
- SendProgress boolIn Http Headers 
- Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- SendTimeout int
- Send timeout in milliseconds on the socket used for communicating with the client.
- SetOverflow stringMode 
- Sets behaviour on overflow in the set resulting. Possible values:
- bool
- Enables or disables silently skipping of unavailable shards.
- SortOverflow stringMode 
- Sets behaviour on overflow while sort. Possible values:
- TimeoutOverflow stringMode 
- Sets behaviour on overflow. Possible values:
- TransferOverflow stringMode 
- Sets behaviour on overflow. Possible values:
- TransformNull boolIn 
- Enables equality of NULL values for IN operator.
- UseUncompressed boolCache 
- Whether to use a cache of uncompressed blocks.
- addHttp BooleanCors Header 
- Include CORS headers in HTTP responces.
- allowDdl Boolean
- Allows or denies DDL queries.
- compile Boolean
- Enable compilation of queries.
- compileExpressions Boolean
- Turn on expression compilation.
- connectTimeout Integer
- Connect timeout in milliseconds on the socket used for communicating with the client.
- countDistinct StringImplementation 
- Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- distinctOverflow StringMode 
- Sets behaviour on overflow when using DISTINCT. Possible values:
- distributedAggregation BooleanMemory Efficient 
- Determine the behavior of distributed subqueries.
- distributedDdl IntegerTask Timeout 
- Timeout for DDL queries, in milliseconds.
- distributedProduct StringMode 
- Changes the behaviour of distributed subqueries.
- emptyResult BooleanFor Aggregation By Empty Set 
- Allows to retunr empty result.
- enableHttp BooleanCompression 
- Enables or disables data compression in the response to an HTTP request.
- fallbackTo BooleanStale Replicas For Distributed Queries 
- Forces a query to an out-of-date replica if updated data is not available.
- forceIndex BooleanBy Date 
- Disables query execution if the index can’t be used by date.
- forcePrimary BooleanKey 
- Disables query execution if indexing by the primary key is not possible.
- groupBy StringOverflow Mode 
- Sets behaviour on overflow while GROUP BY operation. Possible values:
- groupBy IntegerTwo Level Threshold 
- Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- groupBy IntegerTwo Level Threshold Bytes 
- Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- httpConnection IntegerTimeout 
- Timeout for HTTP connection in milliseconds.
- httpHeaders IntegerProgress Interval 
- Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- httpReceive IntegerTimeout 
- Timeout for HTTP connection in milliseconds.
- httpSend IntegerTimeout 
- Timeout for HTTP connection in milliseconds.
- inputFormat BooleanDefaults For Omitted Fields 
- When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- inputFormat BooleanValues Interpret Expressions 
- Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- insertQuorum Integer
- Enables the quorum writes.
- insertQuorum IntegerTimeout 
- Write to a quorum timeout in milliseconds.
- joinOverflow StringMode 
- Sets behaviour on overflow in JOIN. Possible values:
- joinUse BooleanNulls 
- Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- joinedSubquery BooleanRequires Alias 
- Require aliases for subselects and table functions in FROM that more than one table is present.
- lowCardinality BooleanAllow In Native Format 
- Allows or restricts using the LowCardinality data type with the Native format.
- maxAst IntegerDepth 
- Maximum abstract syntax tree depth.
- maxAst IntegerElements 
- Maximum abstract syntax tree elements.
- maxBlock IntegerSize 
- A recommendation for what size of the block (in a count of rows) to load from tables.
- maxBytes IntegerBefore External Group By 
- Limit in bytes for using memoru for GROUP BY before using swap on disk.
- maxBytes IntegerBefore External Sort 
- This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- maxBytes IntegerIn Distinct 
- Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- maxBytes IntegerIn Join 
- Limit on maximum size of the hash table for JOIN, in bytes.
- maxBytes IntegerIn Set 
- Limit on the number of bytes in the set resulting from the execution of the IN section.
- maxBytes IntegerTo Read 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- maxBytes IntegerTo Sort 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- maxBytes IntegerTo Transfer 
- Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- maxColumns IntegerTo Read 
- Limits the maximum number of columns that can be read from a table in a single query.
- maxExecution IntegerTime 
- Limits the maximum query execution time in milliseconds.
- maxExpanded IntegerAst Elements 
- Maximum abstract syntax tree depth after after expansion of aliases.
- maxInsert IntegerBlock Size 
- The size of blocks (in a count of rows) to form for insertion into a table.
- maxMemory IntegerUsage 
- Limits the maximum memory usage (in bytes) for processing queries on a single server.
- maxMemory IntegerUsage For User 
- Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- maxNetwork IntegerBandwidth 
- Limits the speed of the data exchange over the network in bytes per second.
- maxNetwork IntegerBandwidth For User 
- Limits the speed of the data exchange over the network in bytes per second.
- maxQuery IntegerSize 
- The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- maxReplica IntegerDelay For Distributed Queries 
- Disables lagging replicas for distributed queries.
- maxResult IntegerBytes 
- Limits the number of bytes in the result.
- maxResult IntegerRows 
- Limits the number of rows in the result.
- maxRows IntegerIn Distinct 
- Limits the maximum number of different rows when using DISTINCT.
- maxRows IntegerIn Join 
- Limit on maximum size of the hash table for JOIN, in rows.
- maxRows IntegerIn Set 
- Limit on the number of rows in the set resulting from the execution of the IN section.
- maxRows IntegerTo Group By 
- Limits the maximum number of unique keys received from aggregation function.
- maxRows IntegerTo Read 
- Limits the maximum number of rows that can be read from a table when running a query.
- maxRows IntegerTo Sort 
- Limits the maximum number of rows that can be read from a table for sorting.
- maxRows IntegerTo Transfer 
- Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- maxTemporary IntegerColumns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- maxTemporary IntegerNon Const Columns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- maxThreads Integer
- The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- mergeTree IntegerMax Bytes To Use Cache 
- If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- mergeTree IntegerMax Rows To Use Cache 
- If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- mergeTree IntegerMin Bytes For Concurrent Read 
- If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- mergeTree IntegerMin Rows For Concurrent Read 
- If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- minBytes IntegerTo Use Direct Io 
- The minimum data volume required for using direct I/O access to the storage disk.
- minCount IntegerTo Compile 
- How many times to potentially use a compiled chunk of code before running compilation.
- minCount IntegerTo Compile Expression 
- A query waits for expression compilation process to complete prior to continuing execution.
- minExecution IntegerSpeed 
- Minimal execution speed in rows per second.
- minExecution IntegerSpeed Bytes 
- Minimal execution speed in bytes per second.
- minInsert IntegerBlock Size Bytes 
- Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- minInsert IntegerBlock Size Rows 
- Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- outputFormat BooleanJson Quote64bit Integers 
- If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- outputFormat BooleanJson Quote Denormals 
- Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- priority Integer
- Query priority.
- quotaMode String
- Quota accounting mode.
- readOverflow StringMode 
- Sets behaviour on overflow while read. Possible values:
- readonly Integer
- Restricts permissions for reading data, write data and change settings queries.
- receiveTimeout Integer
- Receive timeout in milliseconds on the socket used for communicating with the client.
- replicationAlter IntegerPartitions Sync 
- For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- resultOverflow StringMode 
- Sets behaviour on overflow in result. Possible values:
- selectSequential BooleanConsistency 
- Enables or disables sequential consistency for SELECT queries.
- sendProgress BooleanIn Http Headers 
- Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- sendTimeout Integer
- Send timeout in milliseconds on the socket used for communicating with the client.
- setOverflow StringMode 
- Sets behaviour on overflow in the set resulting. Possible values:
- Boolean
- Enables or disables silently skipping of unavailable shards.
- sortOverflow StringMode 
- Sets behaviour on overflow while sort. Possible values:
- timeoutOverflow StringMode 
- Sets behaviour on overflow. Possible values:
- transferOverflow StringMode 
- Sets behaviour on overflow. Possible values:
- transformNull BooleanIn 
- Enables equality of NULL values for IN operator.
- useUncompressed BooleanCache 
- Whether to use a cache of uncompressed blocks.
- addHttp booleanCors Header 
- Include CORS headers in HTTP responces.
- allowDdl boolean
- Allows or denies DDL queries.
- compile boolean
- Enable compilation of queries.
- compileExpressions boolean
- Turn on expression compilation.
- connectTimeout number
- Connect timeout in milliseconds on the socket used for communicating with the client.
- countDistinct stringImplementation 
- Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- distinctOverflow stringMode 
- Sets behaviour on overflow when using DISTINCT. Possible values:
- distributedAggregation booleanMemory Efficient 
- Determine the behavior of distributed subqueries.
- distributedDdl numberTask Timeout 
- Timeout for DDL queries, in milliseconds.
- distributedProduct stringMode 
- Changes the behaviour of distributed subqueries.
- emptyResult booleanFor Aggregation By Empty Set 
- Allows to retunr empty result.
- enableHttp booleanCompression 
- Enables or disables data compression in the response to an HTTP request.
- fallbackTo booleanStale Replicas For Distributed Queries 
- Forces a query to an out-of-date replica if updated data is not available.
- forceIndex booleanBy Date 
- Disables query execution if the index can’t be used by date.
- forcePrimary booleanKey 
- Disables query execution if indexing by the primary key is not possible.
- groupBy stringOverflow Mode 
- Sets behaviour on overflow while GROUP BY operation. Possible values:
- groupBy numberTwo Level Threshold 
- Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- groupBy numberTwo Level Threshold Bytes 
- Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- httpConnection numberTimeout 
- Timeout for HTTP connection in milliseconds.
- httpHeaders numberProgress Interval 
- Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- httpReceive numberTimeout 
- Timeout for HTTP connection in milliseconds.
- httpSend numberTimeout 
- Timeout for HTTP connection in milliseconds.
- inputFormat booleanDefaults For Omitted Fields 
- When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- inputFormat booleanValues Interpret Expressions 
- Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- insertQuorum number
- Enables the quorum writes.
- insertQuorum numberTimeout 
- Write to a quorum timeout in milliseconds.
- joinOverflow stringMode 
- Sets behaviour on overflow in JOIN. Possible values:
- joinUse booleanNulls 
- Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- joinedSubquery booleanRequires Alias 
- Require aliases for subselects and table functions in FROM that more than one table is present.
- lowCardinality booleanAllow In Native Format 
- Allows or restricts using the LowCardinality data type with the Native format.
- maxAst numberDepth 
- Maximum abstract syntax tree depth.
- maxAst numberElements 
- Maximum abstract syntax tree elements.
- maxBlock numberSize 
- A recommendation for what size of the block (in a count of rows) to load from tables.
- maxBytes numberBefore External Group By 
- Limit in bytes for using memoru for GROUP BY before using swap on disk.
- maxBytes numberBefore External Sort 
- This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- maxBytes numberIn Distinct 
- Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- maxBytes numberIn Join 
- Limit on maximum size of the hash table for JOIN, in bytes.
- maxBytes numberIn Set 
- Limit on the number of bytes in the set resulting from the execution of the IN section.
- maxBytes numberTo Read 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- maxBytes numberTo Sort 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- maxBytes numberTo Transfer 
- Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- maxColumns numberTo Read 
- Limits the maximum number of columns that can be read from a table in a single query.
- maxExecution numberTime 
- Limits the maximum query execution time in milliseconds.
- maxExpanded numberAst Elements 
- Maximum abstract syntax tree depth after after expansion of aliases.
- maxInsert numberBlock Size 
- The size of blocks (in a count of rows) to form for insertion into a table.
- maxMemory numberUsage 
- Limits the maximum memory usage (in bytes) for processing queries on a single server.
- maxMemory numberUsage For User 
- Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- maxNetwork numberBandwidth 
- Limits the speed of the data exchange over the network in bytes per second.
- maxNetwork numberBandwidth For User 
- Limits the speed of the data exchange over the network in bytes per second.
- maxQuery numberSize 
- The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- maxReplica numberDelay For Distributed Queries 
- Disables lagging replicas for distributed queries.
- maxResult numberBytes 
- Limits the number of bytes in the result.
- maxResult numberRows 
- Limits the number of rows in the result.
- maxRows numberIn Distinct 
- Limits the maximum number of different rows when using DISTINCT.
- maxRows numberIn Join 
- Limit on maximum size of the hash table for JOIN, in rows.
- maxRows numberIn Set 
- Limit on the number of rows in the set resulting from the execution of the IN section.
- maxRows numberTo Group By 
- Limits the maximum number of unique keys received from aggregation function.
- maxRows numberTo Read 
- Limits the maximum number of rows that can be read from a table when running a query.
- maxRows numberTo Sort 
- Limits the maximum number of rows that can be read from a table for sorting.
- maxRows numberTo Transfer 
- Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- maxTemporary numberColumns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- maxTemporary numberNon Const Columns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- maxThreads number
- The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- mergeTree numberMax Bytes To Use Cache 
- If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- mergeTree numberMax Rows To Use Cache 
- If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- mergeTree numberMin Bytes For Concurrent Read 
- If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- mergeTree numberMin Rows For Concurrent Read 
- If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- minBytes numberTo Use Direct Io 
- The minimum data volume required for using direct I/O access to the storage disk.
- minCount numberTo Compile 
- How many times to potentially use a compiled chunk of code before running compilation.
- minCount numberTo Compile Expression 
- A query waits for expression compilation process to complete prior to continuing execution.
- minExecution numberSpeed 
- Minimal execution speed in rows per second.
- minExecution numberSpeed Bytes 
- Minimal execution speed in bytes per second.
- minInsert numberBlock Size Bytes 
- Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- minInsert numberBlock Size Rows 
- Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- outputFormat booleanJson Quote64bit Integers 
- If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- outputFormat booleanJson Quote Denormals 
- Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- priority number
- Query priority.
- quotaMode string
- Quota accounting mode.
- readOverflow stringMode 
- Sets behaviour on overflow while read. Possible values:
- readonly number
- Restricts permissions for reading data, write data and change settings queries.
- receiveTimeout number
- Receive timeout in milliseconds on the socket used for communicating with the client.
- replicationAlter numberPartitions Sync 
- For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- resultOverflow stringMode 
- Sets behaviour on overflow in result. Possible values:
- selectSequential booleanConsistency 
- Enables or disables sequential consistency for SELECT queries.
- sendProgress booleanIn Http Headers 
- Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- sendTimeout number
- Send timeout in milliseconds on the socket used for communicating with the client.
- setOverflow stringMode 
- Sets behaviour on overflow in the set resulting. Possible values:
- boolean
- Enables or disables silently skipping of unavailable shards.
- sortOverflow stringMode 
- Sets behaviour on overflow while sort. Possible values:
- timeoutOverflow stringMode 
- Sets behaviour on overflow. Possible values:
- transferOverflow stringMode 
- Sets behaviour on overflow. Possible values:
- transformNull booleanIn 
- Enables equality of NULL values for IN operator.
- useUncompressed booleanCache 
- Whether to use a cache of uncompressed blocks.
- add_http_ boolcors_ header 
- Include CORS headers in HTTP responces.
- allow_ddl bool
- Allows or denies DDL queries.
- compile bool
- Enable compilation of queries.
- compile_expressions bool
- Turn on expression compilation.
- connect_timeout int
- Connect timeout in milliseconds on the socket used for communicating with the client.
- count_distinct_ strimplementation 
- Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- distinct_overflow_ strmode 
- Sets behaviour on overflow when using DISTINCT. Possible values:
- distributed_aggregation_ boolmemory_ efficient 
- Determine the behavior of distributed subqueries.
- distributed_ddl_ inttask_ timeout 
- Timeout for DDL queries, in milliseconds.
- distributed_product_ strmode 
- Changes the behaviour of distributed subqueries.
- empty_result_ boolfor_ aggregation_ by_ empty_ set 
- Allows to retunr empty result.
- enable_http_ boolcompression 
- Enables or disables data compression in the response to an HTTP request.
- fallback_to_ boolstale_ replicas_ for_ distributed_ queries 
- Forces a query to an out-of-date replica if updated data is not available.
- force_index_ boolby_ date 
- Disables query execution if the index can’t be used by date.
- force_primary_ boolkey 
- Disables query execution if indexing by the primary key is not possible.
- group_by_ stroverflow_ mode 
- Sets behaviour on overflow while GROUP BY operation. Possible values:
- group_by_ inttwo_ level_ threshold 
- Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- group_by_ inttwo_ level_ threshold_ bytes 
- Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- http_connection_ inttimeout 
- Timeout for HTTP connection in milliseconds.
- http_headers_ intprogress_ interval 
- Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- http_receive_ inttimeout 
- Timeout for HTTP connection in milliseconds.
- http_send_ inttimeout 
- Timeout for HTTP connection in milliseconds.
- input_format_ booldefaults_ for_ omitted_ fields 
- When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- input_format_ boolvalues_ interpret_ expressions 
- Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- insert_quorum int
- Enables the quorum writes.
- insert_quorum_ inttimeout 
- Write to a quorum timeout in milliseconds.
- join_overflow_ strmode 
- Sets behaviour on overflow in JOIN. Possible values:
- join_use_ boolnulls 
- Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- joined_subquery_ boolrequires_ alias 
- Require aliases for subselects and table functions in FROM that more than one table is present.
- low_cardinality_ boolallow_ in_ native_ format 
- Allows or restricts using the LowCardinality data type with the Native format.
- max_ast_ intdepth 
- Maximum abstract syntax tree depth.
- max_ast_ intelements 
- Maximum abstract syntax tree elements.
- max_block_ intsize 
- A recommendation for what size of the block (in a count of rows) to load from tables.
- max_bytes_ intbefore_ external_ group_ by 
- Limit in bytes for using memoru for GROUP BY before using swap on disk.
- max_bytes_ intbefore_ external_ sort 
- This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- max_bytes_ intin_ distinct 
- Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- max_bytes_ intin_ join 
- Limit on maximum size of the hash table for JOIN, in bytes.
- max_bytes_ intin_ set 
- Limit on the number of bytes in the set resulting from the execution of the IN section.
- max_bytes_ intto_ read 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- max_bytes_ intto_ sort 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- max_bytes_ intto_ transfer 
- Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max_columns_ intto_ read 
- Limits the maximum number of columns that can be read from a table in a single query.
- max_execution_ inttime 
- Limits the maximum query execution time in milliseconds.
- max_expanded_ intast_ elements 
- Maximum abstract syntax tree depth after after expansion of aliases.
- max_insert_ intblock_ size 
- The size of blocks (in a count of rows) to form for insertion into a table.
- max_memory_ intusage 
- Limits the maximum memory usage (in bytes) for processing queries on a single server.
- max_memory_ intusage_ for_ user 
- Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- max_network_ intbandwidth 
- Limits the speed of the data exchange over the network in bytes per second.
- max_network_ intbandwidth_ for_ user 
- Limits the speed of the data exchange over the network in bytes per second.
- max_query_ intsize 
- The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- max_replica_ intdelay_ for_ distributed_ queries 
- Disables lagging replicas for distributed queries.
- max_result_ intbytes 
- Limits the number of bytes in the result.
- max_result_ introws 
- Limits the number of rows in the result.
- max_rows_ intin_ distinct 
- Limits the maximum number of different rows when using DISTINCT.
- max_rows_ intin_ join 
- Limit on maximum size of the hash table for JOIN, in rows.
- max_rows_ intin_ set 
- Limit on the number of rows in the set resulting from the execution of the IN section.
- max_rows_ intto_ group_ by 
- Limits the maximum number of unique keys received from aggregation function.
- max_rows_ intto_ read 
- Limits the maximum number of rows that can be read from a table when running a query.
- max_rows_ intto_ sort 
- Limits the maximum number of rows that can be read from a table for sorting.
- max_rows_ intto_ transfer 
- Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- max_temporary_ intcolumns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- max_temporary_ intnon_ const_ columns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- max_threads int
- The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- merge_tree_ intmax_ bytes_ to_ use_ cache 
- If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- merge_tree_ intmax_ rows_ to_ use_ cache 
- If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- merge_tree_ intmin_ bytes_ for_ concurrent_ read 
- If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- merge_tree_ intmin_ rows_ for_ concurrent_ read 
- If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- min_bytes_ intto_ use_ direct_ io 
- The minimum data volume required for using direct I/O access to the storage disk.
- min_count_ intto_ compile 
- How many times to potentially use a compiled chunk of code before running compilation.
- min_count_ intto_ compile_ expression 
- A query waits for expression compilation process to complete prior to continuing execution.
- min_execution_ intspeed 
- Minimal execution speed in rows per second.
- min_execution_ intspeed_ bytes 
- Minimal execution speed in bytes per second.
- min_insert_ intblock_ size_ bytes 
- Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- min_insert_ intblock_ size_ rows 
- Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- output_format_ booljson_ quote64bit_ integers 
- If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- output_format_ booljson_ quote_ denormals 
- Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- priority int
- Query priority.
- quota_mode str
- Quota accounting mode.
- read_overflow_ strmode 
- Sets behaviour on overflow while read. Possible values:
- readonly int
- Restricts permissions for reading data, write data and change settings queries.
- receive_timeout int
- Receive timeout in milliseconds on the socket used for communicating with the client.
- replication_alter_ intpartitions_ sync 
- For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- result_overflow_ strmode 
- Sets behaviour on overflow in result. Possible values:
- select_sequential_ boolconsistency 
- Enables or disables sequential consistency for SELECT queries.
- send_progress_ boolin_ http_ headers 
- Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- send_timeout int
- Send timeout in milliseconds on the socket used for communicating with the client.
- set_overflow_ strmode 
- Sets behaviour on overflow in the set resulting. Possible values:
- bool
- Enables or disables silently skipping of unavailable shards.
- sort_overflow_ strmode 
- Sets behaviour on overflow while sort. Possible values:
- timeout_overflow_ strmode 
- Sets behaviour on overflow. Possible values:
- transfer_overflow_ strmode 
- Sets behaviour on overflow. Possible values:
- transform_null_ boolin 
- Enables equality of NULL values for IN operator.
- use_uncompressed_ boolcache 
- Whether to use a cache of uncompressed blocks.
- addHttp BooleanCors Header 
- Include CORS headers in HTTP responces.
- allowDdl Boolean
- Allows or denies DDL queries.
- compile Boolean
- Enable compilation of queries.
- compileExpressions Boolean
- Turn on expression compilation.
- connectTimeout Number
- Connect timeout in milliseconds on the socket used for communicating with the client.
- countDistinct StringImplementation 
- Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
- distinctOverflow StringMode 
- Sets behaviour on overflow when using DISTINCT. Possible values:
- distributedAggregation BooleanMemory Efficient 
- Determine the behavior of distributed subqueries.
- distributedDdl NumberTask Timeout 
- Timeout for DDL queries, in milliseconds.
- distributedProduct StringMode 
- Changes the behaviour of distributed subqueries.
- emptyResult BooleanFor Aggregation By Empty Set 
- Allows to retunr empty result.
- enableHttp BooleanCompression 
- Enables or disables data compression in the response to an HTTP request.
- fallbackTo BooleanStale Replicas For Distributed Queries 
- Forces a query to an out-of-date replica if updated data is not available.
- forceIndex BooleanBy Date 
- Disables query execution if the index can’t be used by date.
- forcePrimary BooleanKey 
- Disables query execution if indexing by the primary key is not possible.
- groupBy StringOverflow Mode 
- Sets behaviour on overflow while GROUP BY operation. Possible values:
- groupBy NumberTwo Level Threshold 
- Sets the threshold of the number of keys, after that the two-level aggregation should be used.
- groupBy NumberTwo Level Threshold Bytes 
- Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
- httpConnection NumberTimeout 
- Timeout for HTTP connection in milliseconds.
- httpHeaders NumberProgress Interval 
- Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
- httpReceive NumberTimeout 
- Timeout for HTTP connection in milliseconds.
- httpSend NumberTimeout 
- Timeout for HTTP connection in milliseconds.
- inputFormat BooleanDefaults For Omitted Fields 
- When performing INSERT queries, replace omitted input column values with default values of the respective columns.
- inputFormat BooleanValues Interpret Expressions 
- Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
- insertQuorum Number
- Enables the quorum writes.
- insertQuorum NumberTimeout 
- Write to a quorum timeout in milliseconds.
- joinOverflow StringMode 
- Sets behaviour on overflow in JOIN. Possible values:
- joinUse BooleanNulls 
- Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
- joinedSubquery BooleanRequires Alias 
- Require aliases for subselects and table functions in FROM that more than one table is present.
- lowCardinality BooleanAllow In Native Format 
- Allows or restricts using the LowCardinality data type with the Native format.
- maxAst NumberDepth 
- Maximum abstract syntax tree depth.
- maxAst NumberElements 
- Maximum abstract syntax tree elements.
- maxBlock NumberSize 
- A recommendation for what size of the block (in a count of rows) to load from tables.
- maxBytes NumberBefore External Group By 
- Limit in bytes for using memoru for GROUP BY before using swap on disk.
- maxBytes NumberBefore External Sort 
- This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
- maxBytes NumberIn Distinct 
- Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
- maxBytes NumberIn Join 
- Limit on maximum size of the hash table for JOIN, in bytes.
- maxBytes NumberIn Set 
- Limit on the number of bytes in the set resulting from the execution of the IN section.
- maxBytes NumberTo Read 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
- maxBytes NumberTo Sort 
- Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
- maxBytes NumberTo Transfer 
- Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- maxColumns NumberTo Read 
- Limits the maximum number of columns that can be read from a table in a single query.
- maxExecution NumberTime 
- Limits the maximum query execution time in milliseconds.
- maxExpanded NumberAst Elements 
- Maximum abstract syntax tree depth after after expansion of aliases.
- maxInsert NumberBlock Size 
- The size of blocks (in a count of rows) to form for insertion into a table.
- maxMemory NumberUsage 
- Limits the maximum memory usage (in bytes) for processing queries on a single server.
- maxMemory NumberUsage For User 
- Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
- maxNetwork NumberBandwidth 
- Limits the speed of the data exchange over the network in bytes per second.
- maxNetwork NumberBandwidth For User 
- Limits the speed of the data exchange over the network in bytes per second.
- maxQuery NumberSize 
- The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
- maxReplica NumberDelay For Distributed Queries 
- Disables lagging replicas for distributed queries.
- maxResult NumberBytes 
- Limits the number of bytes in the result.
- maxResult NumberRows 
- Limits the number of rows in the result.
- maxRows NumberIn Distinct 
- Limits the maximum number of different rows when using DISTINCT.
- maxRows NumberIn Join 
- Limit on maximum size of the hash table for JOIN, in rows.
- maxRows NumberIn Set 
- Limit on the number of rows in the set resulting from the execution of the IN section.
- maxRows NumberTo Group By 
- Limits the maximum number of unique keys received from aggregation function.
- maxRows NumberTo Read 
- Limits the maximum number of rows that can be read from a table when running a query.
- maxRows NumberTo Sort 
- Limits the maximum number of rows that can be read from a table for sorting.
- maxRows NumberTo Transfer 
- Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
- maxTemporary NumberColumns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
- maxTemporary NumberNon Const Columns 
- Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
- maxThreads Number
- The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
- mergeTree NumberMax Bytes To Use Cache 
- If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
- mergeTree NumberMax Rows To Use Cache 
- If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
- mergeTree NumberMin Bytes For Concurrent Read 
- If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
- mergeTree NumberMin Rows For Concurrent Read 
- If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
- minBytes NumberTo Use Direct Io 
- The minimum data volume required for using direct I/O access to the storage disk.
- minCount NumberTo Compile 
- How many times to potentially use a compiled chunk of code before running compilation.
- minCount NumberTo Compile Expression 
- A query waits for expression compilation process to complete prior to continuing execution.
- minExecution NumberSpeed 
- Minimal execution speed in rows per second.
- minExecution NumberSpeed Bytes 
- Minimal execution speed in bytes per second.
- minInsert NumberBlock Size Bytes 
- Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
- minInsert NumberBlock Size Rows 
- Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
- outputFormat BooleanJson Quote64bit Integers 
- If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
- outputFormat BooleanJson Quote Denormals 
- Enables +nan, -nan, +inf, -inf outputs in JSON output format.
- priority Number
- Query priority.
- quotaMode String
- Quota accounting mode.
- readOverflow StringMode 
- Sets behaviour on overflow while read. Possible values:
- readonly Number
- Restricts permissions for reading data, write data and change settings queries.
- receiveTimeout Number
- Receive timeout in milliseconds on the socket used for communicating with the client.
- replicationAlter NumberPartitions Sync 
- For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
- resultOverflow StringMode 
- Sets behaviour on overflow in result. Possible values:
- selectSequential BooleanConsistency 
- Enables or disables sequential consistency for SELECT queries.
- sendProgress BooleanIn Http Headers 
- Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
- sendTimeout Number
- Send timeout in milliseconds on the socket used for communicating with the client.
- setOverflow StringMode 
- Sets behaviour on overflow in the set resulting. Possible values:
- Boolean
- Enables or disables silently skipping of unavailable shards.
- sortOverflow StringMode 
- Sets behaviour on overflow while sort. Possible values:
- timeoutOverflow StringMode 
- Sets behaviour on overflow. Possible values:
- transferOverflow StringMode 
- Sets behaviour on overflow. Possible values:
- transformNull BooleanIn 
- Enables equality of NULL values for IN operator.
- useUncompressed BooleanCache 
- Whether to use a cache of uncompressed blocks.
MdbClickhouseClusterZookeeper, MdbClickhouseClusterZookeeperArgs        
- Resources
MdbClickhouse Cluster Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Resources
MdbClickhouse Cluster Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
MdbClickhouse Cluster Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
MdbClickhouse Cluster Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
MdbClickhouse Cluster Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources Property Map
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
MdbClickhouseClusterZookeeperResources, MdbClickhouseClusterZookeeperResourcesArgs          
- DiskSize int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- DiskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- ResourcePreset stringId 
- DiskSize int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- DiskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- ResourcePreset stringId 
- diskSize Integer
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType StringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset StringId 
- diskSize number
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset stringId 
- disk_size int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk_type_ strid 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource_preset_ strid 
- diskSize Number
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType StringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset StringId 
Import
A cluster can be imported using the id of the resource, e.g.
 $ pulumi import yandex:index/mdbClickhouseCluster:MdbClickhouseCluster foo cluster_id
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Yandex pulumi/pulumi-yandex
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the yandexTerraform Provider.