We recommend using Azure Native.
azure.media.Transform
Explore with Pulumi AI
Manages a Transform.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const example = new azure.core.ResourceGroup("example", {
    name: "media-resources",
    location: "West Europe",
});
const exampleAccount = new azure.storage.Account("example", {
    name: "examplestoracc",
    resourceGroupName: example.name,
    location: example.location,
    accountTier: "Standard",
    accountReplicationType: "GRS",
});
const exampleServiceAccount = new azure.media.ServiceAccount("example", {
    name: "examplemediaacc",
    location: example.location,
    resourceGroupName: example.name,
    storageAccounts: [{
        id: exampleAccount.id,
        isPrimary: true,
    }],
});
const exampleTransform = new azure.media.Transform("example", {
    name: "transform1",
    resourceGroupName: example.name,
    mediaServicesAccountName: exampleServiceAccount.name,
    description: "My transform description",
    outputs: [{
        relativePriority: "Normal",
        onErrorAction: "ContinueJob",
        builtinPreset: {
            presetName: "AACGoodQualityAudio",
        },
    }],
});
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example",
    name="media-resources",
    location="West Europe")
example_account = azure.storage.Account("example",
    name="examplestoracc",
    resource_group_name=example.name,
    location=example.location,
    account_tier="Standard",
    account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("example",
    name="examplemediaacc",
    location=example.location,
    resource_group_name=example.name,
    storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
        id=example_account.id,
        is_primary=True,
    )])
example_transform = azure.media.Transform("example",
    name="transform1",
    resource_group_name=example.name,
    media_services_account_name=example_service_account.name,
    description="My transform description",
    outputs=[azure.media.TransformOutputArgs(
        relative_priority="Normal",
        on_error_action="ContinueJob",
        builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
            preset_name="AACGoodQualityAudio",
        ),
    )])
package main
import (
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/media"
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
			Name:     pulumi.String("media-resources"),
			Location: pulumi.String("West Europe"),
		})
		if err != nil {
			return err
		}
		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
			Name:                   pulumi.String("examplestoracc"),
			ResourceGroupName:      example.Name,
			Location:               example.Location,
			AccountTier:            pulumi.String("Standard"),
			AccountReplicationType: pulumi.String("GRS"),
		})
		if err != nil {
			return err
		}
		exampleServiceAccount, err := media.NewServiceAccount(ctx, "example", &media.ServiceAccountArgs{
			Name:              pulumi.String("examplemediaacc"),
			Location:          example.Location,
			ResourceGroupName: example.Name,
			StorageAccounts: media.ServiceAccountStorageAccountArray{
				&media.ServiceAccountStorageAccountArgs{
					Id:        exampleAccount.ID(),
					IsPrimary: pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = media.NewTransform(ctx, "example", &media.TransformArgs{
			Name:                     pulumi.String("transform1"),
			ResourceGroupName:        example.Name,
			MediaServicesAccountName: exampleServiceAccount.Name,
			Description:              pulumi.String("My transform description"),
			Outputs: media.TransformOutputTypeArray{
				&media.TransformOutputTypeArgs{
					RelativePriority: pulumi.String("Normal"),
					OnErrorAction:    pulumi.String("ContinueJob"),
					BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
						PresetName: pulumi.String("AACGoodQualityAudio"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
return await Deployment.RunAsync(() => 
{
    var example = new Azure.Core.ResourceGroup("example", new()
    {
        Name = "media-resources",
        Location = "West Europe",
    });
    var exampleAccount = new Azure.Storage.Account("example", new()
    {
        Name = "examplestoracc",
        ResourceGroupName = example.Name,
        Location = example.Location,
        AccountTier = "Standard",
        AccountReplicationType = "GRS",
    });
    var exampleServiceAccount = new Azure.Media.ServiceAccount("example", new()
    {
        Name = "examplemediaacc",
        Location = example.Location,
        ResourceGroupName = example.Name,
        StorageAccounts = new[]
        {
            new Azure.Media.Inputs.ServiceAccountStorageAccountArgs
            {
                Id = exampleAccount.Id,
                IsPrimary = true,
            },
        },
    });
    var exampleTransform = new Azure.Media.Transform("example", new()
    {
        Name = "transform1",
        ResourceGroupName = example.Name,
        MediaServicesAccountName = exampleServiceAccount.Name,
        Description = "My transform description",
        Outputs = new[]
        {
            new Azure.Media.Inputs.TransformOutputArgs
            {
                RelativePriority = "Normal",
                OnErrorAction = "ContinueJob",
                BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
                {
                    PresetName = "AACGoodQualityAudio",
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.core.ResourceGroup;
import com.pulumi.azure.core.ResourceGroupArgs;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.media.ServiceAccount;
import com.pulumi.azure.media.ServiceAccountArgs;
import com.pulumi.azure.media.inputs.ServiceAccountStorageAccountArgs;
import com.pulumi.azure.media.Transform;
import com.pulumi.azure.media.TransformArgs;
import com.pulumi.azure.media.inputs.TransformOutputArgs;
import com.pulumi.azure.media.inputs.TransformOutputBuiltinPresetArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var example = new ResourceGroup("example", ResourceGroupArgs.builder()
            .name("media-resources")
            .location("West Europe")
            .build());
        var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
            .name("examplestoracc")
            .resourceGroupName(example.name())
            .location(example.location())
            .accountTier("Standard")
            .accountReplicationType("GRS")
            .build());
        var exampleServiceAccount = new ServiceAccount("exampleServiceAccount", ServiceAccountArgs.builder()
            .name("examplemediaacc")
            .location(example.location())
            .resourceGroupName(example.name())
            .storageAccounts(ServiceAccountStorageAccountArgs.builder()
                .id(exampleAccount.id())
                .isPrimary(true)
                .build())
            .build());
        var exampleTransform = new Transform("exampleTransform", TransformArgs.builder()
            .name("transform1")
            .resourceGroupName(example.name())
            .mediaServicesAccountName(exampleServiceAccount.name())
            .description("My transform description")
            .outputs(TransformOutputArgs.builder()
                .relativePriority("Normal")
                .onErrorAction("ContinueJob")
                .builtinPreset(TransformOutputBuiltinPresetArgs.builder()
                    .presetName("AACGoodQualityAudio")
                    .build())
                .build())
            .build());
    }
}
resources:
  example:
    type: azure:core:ResourceGroup
    properties:
      name: media-resources
      location: West Europe
  exampleAccount:
    type: azure:storage:Account
    name: example
    properties:
      name: examplestoracc
      resourceGroupName: ${example.name}
      location: ${example.location}
      accountTier: Standard
      accountReplicationType: GRS
  exampleServiceAccount:
    type: azure:media:ServiceAccount
    name: example
    properties:
      name: examplemediaacc
      location: ${example.location}
      resourceGroupName: ${example.name}
      storageAccounts:
        - id: ${exampleAccount.id}
          isPrimary: true
  exampleTransform:
    type: azure:media:Transform
    name: example
    properties:
      name: transform1
      resourceGroupName: ${example.name}
      mediaServicesAccountName: ${exampleServiceAccount.name}
      description: My transform description
      outputs:
        - relativePriority: Normal
          onErrorAction: ContinueJob
          builtinPreset:
            presetName: AACGoodQualityAudio
With Multiple Outputs
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const example = new azure.core.ResourceGroup("example", {
    name: "media-resources",
    location: "West Europe",
});
const exampleAccount = new azure.storage.Account("example", {
    name: "examplestoracc",
    resourceGroupName: example.name,
    location: example.location,
    accountTier: "Standard",
    accountReplicationType: "GRS",
});
const exampleServiceAccount = new azure.media.ServiceAccount("example", {
    name: "examplemediaacc",
    location: example.location,
    resourceGroupName: example.name,
    storageAccounts: [{
        id: exampleAccount.id,
        isPrimary: true,
    }],
});
const exampleTransform = new azure.media.Transform("example", {
    name: "transform1",
    resourceGroupName: example.name,
    mediaServicesAccountName: exampleServiceAccount.name,
    description: "My transform description",
    outputs: [
        {
            relativePriority: "Normal",
            onErrorAction: "ContinueJob",
            builtinPreset: {
                presetName: "AACGoodQualityAudio",
                presetConfiguration: {
                    complexity: "Balanced",
                    interleaveOutput: "NonInterleavedOutput",
                    keyFrameIntervalInSeconds: 123122.5,
                    maxBitrateBps: 300000,
                    maxHeight: 480,
                    maxLayers: 14,
                    minBitrateBps: 200000,
                    minHeight: 360,
                },
            },
        },
        {
            relativePriority: "Low",
            onErrorAction: "ContinueJob",
            audioAnalyzerPreset: {
                audioLanguage: "en-US",
                audioAnalysisMode: "Basic",
                experimentalOptions: {
                    env: "test",
                },
            },
        },
        {
            relativePriority: "Low",
            onErrorAction: "StopProcessingJob",
            faceDetectorPreset: {
                analysisResolution: "StandardDefinition",
                blurType: "Med",
                faceRedactorMode: "Combined",
                experimentalOptions: {
                    env: "test",
                },
            },
        },
        {
            relativePriority: "Normal",
            onErrorAction: "StopProcessingJob",
            videoAnalyzerPreset: {
                audioLanguage: "en-US",
                audioAnalysisMode: "Basic",
                insightsType: "AllInsights",
                experimentalOptions: {
                    env: "test",
                },
            },
        },
        {
            relativePriority: "Low",
            onErrorAction: "ContinueJob",
            customPreset: {
                codecs: [
                    {
                        aacAudio: {
                            bitrate: 128000,
                            channels: 2,
                            samplingRate: 48000,
                            profile: "AacLc",
                        },
                    },
                    {
                        copyAudio: {
                            label: "test",
                        },
                    },
                    {
                        copyVideo: {
                            label: "test",
                        },
                    },
                    {
                        h264Video: {
                            keyFrameInterval: "PT1S",
                            stretchMode: "AutoSize",
                            syncMode: "Auto",
                            sceneChangeDetectionEnabled: false,
                            rateControlMode: "ABR",
                            complexity: "Quality",
                            layers: [
                                {
                                    width: "64",
                                    height: "64",
                                    bitrate: 1045000,
                                    maxBitrate: 1045000,
                                    bFrames: 3,
                                    slices: 0,
                                    adaptiveBFrameEnabled: true,
                                    profile: "Auto",
                                    level: "auto",
                                    bufferWindow: "PT5S",
                                    referenceFrames: 4,
                                    crf: 23,
                                    entropyMode: "Cabac",
                                },
                                {
                                    width: "64",
                                    height: "64",
                                    bitrate: 1000,
                                    maxBitrate: 1000,
                                    bFrames: 3,
                                    frameRate: "32",
                                    slices: 1,
                                    adaptiveBFrameEnabled: true,
                                    profile: "High444",
                                    level: "auto",
                                    bufferWindow: "PT5S",
                                    referenceFrames: 4,
                                    crf: 23,
                                    entropyMode: "Cavlc",
                                },
                            ],
                        },
                    },
                    {
                        h265Video: {
                            keyFrameInterval: "PT2S",
                            stretchMode: "AutoSize",
                            syncMode: "Auto",
                            sceneChangeDetectionEnabled: false,
                            complexity: "Speed",
                            layers: [{
                                width: "64",
                                height: "64",
                                bitrate: 1045000,
                                maxBitrate: 1045000,
                                bFrames: 3,
                                slices: 5,
                                adaptiveBFrameEnabled: true,
                                profile: "Auto",
                                label: "test",
                                level: "auto",
                                bufferWindow: "PT5S",
                                frameRate: "32",
                                referenceFrames: 4,
                                crf: 23,
                            }],
                        },
                    },
                    {
                        jpgImage: {
                            stretchMode: "AutoSize",
                            syncMode: "Auto",
                            start: "10",
                            range: "100%%",
                            spriteColumn: 1,
                            step: "10",
                            layers: [{
                                quality: 70,
                                height: "180",
                                label: "test",
                                width: "120",
                            }],
                        },
                    },
                    {
                        pngImage: {
                            stretchMode: "AutoSize",
                            syncMode: "Auto",
                            start: "{Best}",
                            range: "80",
                            step: "10",
                            layers: [{
                                height: "180",
                                label: "test",
                                width: "120",
                            }],
                        },
                    },
                ],
                formats: [
                    {
                        jpg: {
                            filenamePattern: "test{Basename}",
                        },
                    },
                    {
                        mp4: {
                            filenamePattern: "test{Bitrate}",
                            outputFiles: [{
                                labels: [
                                    "test",
                                    "ppe",
                                ],
                            }],
                        },
                    },
                    {
                        png: {
                            filenamePattern: "test{Basename}",
                        },
                    },
                    {
                        transportStream: {
                            filenamePattern: "test{Bitrate}",
                            outputFiles: [{
                                labels: ["prod"],
                            }],
                        },
                    },
                ],
                filter: {
                    cropRectangle: {
                        height: "240",
                        left: "30",
                        top: "360",
                        width: "70",
                    },
                    deinterlace: {
                        parity: "TopFieldFirst",
                        mode: "AutoPixelAdaptive",
                    },
                    fadeIn: {
                        duration: "PT5S",
                        fadeColor: "0xFF0000",
                        start: "10",
                    },
                    fadeOut: {
                        duration: "90%%",
                        fadeColor: "#FF0C7B",
                        start: "10%%",
                    },
                    rotation: "Auto",
                    overlays: [
                        {
                            audio: {
                                inputLabel: "label.jpg",
                                start: "PT5S",
                                end: "PT30S",
                                fadeInDuration: "PT1S",
                                fadeOutDuration: "PT2S",
                                audioGainLevel: 1,
                            },
                        },
                        {
                            video: {
                                inputLabel: "label.jpg",
                                start: "PT5S",
                                end: "PT30S",
                                fadeInDuration: "PT1S",
                                fadeOutDuration: "PT2S",
                                audioGainLevel: 1,
                                opacity: 1,
                                position: {
                                    height: "180",
                                    left: "20",
                                    top: "240",
                                    width: "140",
                                },
                                cropRectangle: {
                                    height: "240",
                                    left: "30",
                                    top: "360",
                                    width: "70",
                                },
                            },
                        },
                    ],
                },
            },
        },
    ],
});
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example",
    name="media-resources",
    location="West Europe")
example_account = azure.storage.Account("example",
    name="examplestoracc",
    resource_group_name=example.name,
    location=example.location,
    account_tier="Standard",
    account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("example",
    name="examplemediaacc",
    location=example.location,
    resource_group_name=example.name,
    storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
        id=example_account.id,
        is_primary=True,
    )])
example_transform = azure.media.Transform("example",
    name="transform1",
    resource_group_name=example.name,
    media_services_account_name=example_service_account.name,
    description="My transform description",
    outputs=[
        azure.media.TransformOutputArgs(
            relative_priority="Normal",
            on_error_action="ContinueJob",
            builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
                preset_name="AACGoodQualityAudio",
                preset_configuration=azure.media.TransformOutputBuiltinPresetPresetConfigurationArgs(
                    complexity="Balanced",
                    interleave_output="NonInterleavedOutput",
                    key_frame_interval_in_seconds=123122.5,
                    max_bitrate_bps=300000,
                    max_height=480,
                    max_layers=14,
                    min_bitrate_bps=200000,
                    min_height=360,
                ),
            ),
        ),
        azure.media.TransformOutputArgs(
            relative_priority="Low",
            on_error_action="ContinueJob",
            audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs(
                audio_language="en-US",
                audio_analysis_mode="Basic",
                experimental_options={
                    "env": "test",
                },
            ),
        ),
        azure.media.TransformOutputArgs(
            relative_priority="Low",
            on_error_action="StopProcessingJob",
            face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs(
                analysis_resolution="StandardDefinition",
                blur_type="Med",
                face_redactor_mode="Combined",
                experimental_options={
                    "env": "test",
                },
            ),
        ),
        azure.media.TransformOutputArgs(
            relative_priority="Normal",
            on_error_action="StopProcessingJob",
            video_analyzer_preset=azure.media.TransformOutputVideoAnalyzerPresetArgs(
                audio_language="en-US",
                audio_analysis_mode="Basic",
                insights_type="AllInsights",
                experimental_options={
                    "env": "test",
                },
            ),
        ),
        azure.media.TransformOutputArgs(
            relative_priority="Low",
            on_error_action="ContinueJob",
            custom_preset=azure.media.TransformOutputCustomPresetArgs(
                codecs=[
                    azure.media.TransformOutputCustomPresetCodecArgs(
                        aac_audio=azure.media.TransformOutputCustomPresetCodecAacAudioArgs(
                            bitrate=128000,
                            channels=2,
                            sampling_rate=48000,
                            profile="AacLc",
                        ),
                    ),
                    azure.media.TransformOutputCustomPresetCodecArgs(
                        copy_audio=azure.media.TransformOutputCustomPresetCodecCopyAudioArgs(
                            label="test",
                        ),
                    ),
                    azure.media.TransformOutputCustomPresetCodecArgs(
                        copy_video=azure.media.TransformOutputCustomPresetCodecCopyVideoArgs(
                            label="test",
                        ),
                    ),
                    azure.media.TransformOutputCustomPresetCodecArgs(
                        h264_video=azure.media.TransformOutputCustomPresetCodecH264VideoArgs(
                            key_frame_interval="PT1S",
                            stretch_mode="AutoSize",
                            sync_mode="Auto",
                            scene_change_detection_enabled=False,
                            rate_control_mode="ABR",
                            complexity="Quality",
                            layers=[
                                azure.media.TransformOutputCustomPresetCodecH264VideoLayerArgs(
                                    width="64",
                                    height="64",
                                    bitrate=1045000,
                                    max_bitrate=1045000,
                                    b_frames=3,
                                    slices=0,
                                    adaptive_b_frame_enabled=True,
                                    profile="Auto",
                                    level="auto",
                                    buffer_window="PT5S",
                                    reference_frames=4,
                                    crf=23,
                                    entropy_mode="Cabac",
                                ),
                                azure.media.TransformOutputCustomPresetCodecH264VideoLayerArgs(
                                    width="64",
                                    height="64",
                                    bitrate=1000,
                                    max_bitrate=1000,
                                    b_frames=3,
                                    frame_rate="32",
                                    slices=1,
                                    adaptive_b_frame_enabled=True,
                                    profile="High444",
                                    level="auto",
                                    buffer_window="PT5S",
                                    reference_frames=4,
                                    crf=23,
                                    entropy_mode="Cavlc",
                                ),
                            ],
                        ),
                    ),
                    azure.media.TransformOutputCustomPresetCodecArgs(
                        h265_video=azure.media.TransformOutputCustomPresetCodecH265VideoArgs(
                            key_frame_interval="PT2S",
                            stretch_mode="AutoSize",
                            sync_mode="Auto",
                            scene_change_detection_enabled=False,
                            complexity="Speed",
                            layers=[azure.media.TransformOutputCustomPresetCodecH265VideoLayerArgs(
                                width="64",
                                height="64",
                                bitrate=1045000,
                                max_bitrate=1045000,
                                b_frames=3,
                                slices=5,
                                adaptive_b_frame_enabled=True,
                                profile="Auto",
                                label="test",
                                level="auto",
                                buffer_window="PT5S",
                                frame_rate="32",
                                reference_frames=4,
                                crf=23,
                            )],
                        ),
                    ),
                    azure.media.TransformOutputCustomPresetCodecArgs(
                        jpg_image=azure.media.TransformOutputCustomPresetCodecJpgImageArgs(
                            stretch_mode="AutoSize",
                            sync_mode="Auto",
                            start="10",
                            range="100%%",
                            sprite_column=1,
                            step="10",
                            layers=[azure.media.TransformOutputCustomPresetCodecJpgImageLayerArgs(
                                quality=70,
                                height="180",
                                label="test",
                                width="120",
                            )],
                        ),
                    ),
                    azure.media.TransformOutputCustomPresetCodecArgs(
                        png_image=azure.media.TransformOutputCustomPresetCodecPngImageArgs(
                            stretch_mode="AutoSize",
                            sync_mode="Auto",
                            start="{Best}",
                            range="80",
                            step="10",
                            layers=[azure.media.TransformOutputCustomPresetCodecPngImageLayerArgs(
                                height="180",
                                label="test",
                                width="120",
                            )],
                        ),
                    ),
                ],
                formats=[
                    azure.media.TransformOutputCustomPresetFormatArgs(
                        jpg=azure.media.TransformOutputCustomPresetFormatJpgArgs(
                            filename_pattern="test{Basename}",
                        ),
                    ),
                    azure.media.TransformOutputCustomPresetFormatArgs(
                        mp4=azure.media.TransformOutputCustomPresetFormatMp4Args(
                            filename_pattern="test{Bitrate}",
                            output_files=[azure.media.TransformOutputCustomPresetFormatMp4OutputFileArgs(
                                labels=[
                                    "test",
                                    "ppe",
                                ],
                            )],
                        ),
                    ),
                    azure.media.TransformOutputCustomPresetFormatArgs(
                        png=azure.media.TransformOutputCustomPresetFormatPngArgs(
                            filename_pattern="test{Basename}",
                        ),
                    ),
                    azure.media.TransformOutputCustomPresetFormatArgs(
                        transport_stream=azure.media.TransformOutputCustomPresetFormatTransportStreamArgs(
                            filename_pattern="test{Bitrate}",
                            output_files=[azure.media.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs(
                                labels=["prod"],
                            )],
                        ),
                    ),
                ],
                filter=azure.media.TransformOutputCustomPresetFilterArgs(
                    crop_rectangle=azure.media.TransformOutputCustomPresetFilterCropRectangleArgs(
                        height="240",
                        left="30",
                        top="360",
                        width="70",
                    ),
                    deinterlace=azure.media.TransformOutputCustomPresetFilterDeinterlaceArgs(
                        parity="TopFieldFirst",
                        mode="AutoPixelAdaptive",
                    ),
                    fade_in=azure.media.TransformOutputCustomPresetFilterFadeInArgs(
                        duration="PT5S",
                        fade_color="0xFF0000",
                        start="10",
                    ),
                    fade_out=azure.media.TransformOutputCustomPresetFilterFadeOutArgs(
                        duration="90%%",
                        fade_color="#FF0C7B",
                        start="10%%",
                    ),
                    rotation="Auto",
                    overlays=[
                        azure.media.TransformOutputCustomPresetFilterOverlayArgs(
                            audio=azure.media.TransformOutputCustomPresetFilterOverlayAudioArgs(
                                input_label="label.jpg",
                                start="PT5S",
                                end="PT30S",
                                fade_in_duration="PT1S",
                                fade_out_duration="PT2S",
                                audio_gain_level=1,
                            ),
                        ),
                        azure.media.TransformOutputCustomPresetFilterOverlayArgs(
                            video=azure.media.TransformOutputCustomPresetFilterOverlayVideoArgs(
                                input_label="label.jpg",
                                start="PT5S",
                                end="PT30S",
                                fade_in_duration="PT1S",
                                fade_out_duration="PT2S",
                                audio_gain_level=1,
                                opacity=1,
                                position=azure.media.TransformOutputCustomPresetFilterOverlayVideoPositionArgs(
                                    height="180",
                                    left="20",
                                    top="240",
                                    width="140",
                                ),
                                crop_rectangle=azure.media.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs(
                                    height="240",
                                    left="30",
                                    top="360",
                                    width="70",
                                ),
                            ),
                        ),
                    ],
                ),
            ),
        ),
    ])
package main
import (
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/media"
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
			Name:     pulumi.String("media-resources"),
			Location: pulumi.String("West Europe"),
		})
		if err != nil {
			return err
		}
		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
			Name:                   pulumi.String("examplestoracc"),
			ResourceGroupName:      example.Name,
			Location:               example.Location,
			AccountTier:            pulumi.String("Standard"),
			AccountReplicationType: pulumi.String("GRS"),
		})
		if err != nil {
			return err
		}
		exampleServiceAccount, err := media.NewServiceAccount(ctx, "example", &media.ServiceAccountArgs{
			Name:              pulumi.String("examplemediaacc"),
			Location:          example.Location,
			ResourceGroupName: example.Name,
			StorageAccounts: media.ServiceAccountStorageAccountArray{
				&media.ServiceAccountStorageAccountArgs{
					Id:        exampleAccount.ID(),
					IsPrimary: pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = media.NewTransform(ctx, "example", &media.TransformArgs{
			Name:                     pulumi.String("transform1"),
			ResourceGroupName:        example.Name,
			MediaServicesAccountName: exampleServiceAccount.Name,
			Description:              pulumi.String("My transform description"),
			Outputs: media.TransformOutputTypeArray{
				&media.TransformOutputTypeArgs{
					RelativePriority: pulumi.String("Normal"),
					OnErrorAction:    pulumi.String("ContinueJob"),
					BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
						PresetName: pulumi.String("AACGoodQualityAudio"),
						PresetConfiguration: &media.TransformOutputBuiltinPresetPresetConfigurationArgs{
							Complexity:                pulumi.String("Balanced"),
							InterleaveOutput:          pulumi.String("NonInterleavedOutput"),
							KeyFrameIntervalInSeconds: pulumi.Float64(123122.5),
							MaxBitrateBps:             pulumi.Int(300000),
							MaxHeight:                 pulumi.Int(480),
							MaxLayers:                 pulumi.Int(14),
							MinBitrateBps:             pulumi.Int(200000),
							MinHeight:                 pulumi.Int(360),
						},
					},
				},
				&media.TransformOutputTypeArgs{
					RelativePriority: pulumi.String("Low"),
					OnErrorAction:    pulumi.String("ContinueJob"),
					AudioAnalyzerPreset: &media.TransformOutputAudioAnalyzerPresetArgs{
						AudioLanguage:     pulumi.String("en-US"),
						AudioAnalysisMode: pulumi.String("Basic"),
						ExperimentalOptions: pulumi.StringMap{
							"env": pulumi.String("test"),
						},
					},
				},
				&media.TransformOutputTypeArgs{
					RelativePriority: pulumi.String("Low"),
					OnErrorAction:    pulumi.String("StopProcessingJob"),
					FaceDetectorPreset: &media.TransformOutputFaceDetectorPresetArgs{
						AnalysisResolution: pulumi.String("StandardDefinition"),
						BlurType:           pulumi.String("Med"),
						FaceRedactorMode:   pulumi.String("Combined"),
						ExperimentalOptions: pulumi.StringMap{
							"env": pulumi.String("test"),
						},
					},
				},
				&media.TransformOutputTypeArgs{
					RelativePriority: pulumi.String("Normal"),
					OnErrorAction:    pulumi.String("StopProcessingJob"),
					VideoAnalyzerPreset: &media.TransformOutputVideoAnalyzerPresetArgs{
						AudioLanguage:     pulumi.String("en-US"),
						AudioAnalysisMode: pulumi.String("Basic"),
						InsightsType:      pulumi.String("AllInsights"),
						ExperimentalOptions: pulumi.StringMap{
							"env": pulumi.String("test"),
						},
					},
				},
				&media.TransformOutputTypeArgs{
					RelativePriority: pulumi.String("Low"),
					OnErrorAction:    pulumi.String("ContinueJob"),
					CustomPreset: &media.TransformOutputCustomPresetArgs{
						Codecs: media.TransformOutputCustomPresetCodecArray{
							&media.TransformOutputCustomPresetCodecArgs{
								AacAudio: &media.TransformOutputCustomPresetCodecAacAudioArgs{
									Bitrate:      pulumi.Int(128000),
									Channels:     pulumi.Int(2),
									SamplingRate: pulumi.Int(48000),
									Profile:      pulumi.String("AacLc"),
								},
							},
							&media.TransformOutputCustomPresetCodecArgs{
								CopyAudio: &media.TransformOutputCustomPresetCodecCopyAudioArgs{
									Label: pulumi.String("test"),
								},
							},
							&media.TransformOutputCustomPresetCodecArgs{
								CopyVideo: &media.TransformOutputCustomPresetCodecCopyVideoArgs{
									Label: pulumi.String("test"),
								},
							},
							&media.TransformOutputCustomPresetCodecArgs{
								H264Video: &media.TransformOutputCustomPresetCodecH264VideoArgs{
									KeyFrameInterval:            pulumi.String("PT1S"),
									StretchMode:                 pulumi.String("AutoSize"),
									SyncMode:                    pulumi.String("Auto"),
									SceneChangeDetectionEnabled: pulumi.Bool(false),
									RateControlMode:             pulumi.String("ABR"),
									Complexity:                  pulumi.String("Quality"),
									Layers: media.TransformOutputCustomPresetCodecH264VideoLayerArray{
										&media.TransformOutputCustomPresetCodecH264VideoLayerArgs{
											Width:                 pulumi.String("64"),
											Height:                pulumi.String("64"),
											Bitrate:               pulumi.Int(1045000),
											MaxBitrate:            pulumi.Int(1045000),
											BFrames:               pulumi.Int(3),
											Slices:                pulumi.Int(0),
											AdaptiveBFrameEnabled: pulumi.Bool(true),
											Profile:               pulumi.String("Auto"),
											Level:                 pulumi.String("auto"),
											BufferWindow:          pulumi.String("PT5S"),
											ReferenceFrames:       pulumi.Int(4),
											Crf:                   pulumi.Float64(23),
											EntropyMode:           pulumi.String("Cabac"),
										},
										&media.TransformOutputCustomPresetCodecH264VideoLayerArgs{
											Width:                 pulumi.String("64"),
											Height:                pulumi.String("64"),
											Bitrate:               pulumi.Int(1000),
											MaxBitrate:            pulumi.Int(1000),
											BFrames:               pulumi.Int(3),
											FrameRate:             pulumi.String("32"),
											Slices:                pulumi.Int(1),
											AdaptiveBFrameEnabled: pulumi.Bool(true),
											Profile:               pulumi.String("High444"),
											Level:                 pulumi.String("auto"),
											BufferWindow:          pulumi.String("PT5S"),
											ReferenceFrames:       pulumi.Int(4),
											Crf:                   pulumi.Float64(23),
											EntropyMode:           pulumi.String("Cavlc"),
										},
									},
								},
							},
							&media.TransformOutputCustomPresetCodecArgs{
								H265Video: &media.TransformOutputCustomPresetCodecH265VideoArgs{
									KeyFrameInterval:            pulumi.String("PT2S"),
									StretchMode:                 pulumi.String("AutoSize"),
									SyncMode:                    pulumi.String("Auto"),
									SceneChangeDetectionEnabled: pulumi.Bool(false),
									Complexity:                  pulumi.String("Speed"),
									Layers: media.TransformOutputCustomPresetCodecH265VideoLayerArray{
										&media.TransformOutputCustomPresetCodecH265VideoLayerArgs{
											Width:                 pulumi.String("64"),
											Height:                pulumi.String("64"),
											Bitrate:               pulumi.Int(1045000),
											MaxBitrate:            pulumi.Int(1045000),
											BFrames:               pulumi.Int(3),
											Slices:                pulumi.Int(5),
											AdaptiveBFrameEnabled: pulumi.Bool(true),
											Profile:               pulumi.String("Auto"),
											Label:                 pulumi.String("test"),
											Level:                 pulumi.String("auto"),
											BufferWindow:          pulumi.String("PT5S"),
											FrameRate:             pulumi.String("32"),
											ReferenceFrames:       pulumi.Int(4),
											Crf:                   pulumi.Float64(23),
										},
									},
								},
							},
							&media.TransformOutputCustomPresetCodecArgs{
								JpgImage: &media.TransformOutputCustomPresetCodecJpgImageArgs{
									StretchMode:  pulumi.String("AutoSize"),
									SyncMode:     pulumi.String("Auto"),
									Start:        pulumi.String("10"),
									Range:        pulumi.String("100%%"),
									SpriteColumn: pulumi.Int(1),
									Step:         pulumi.String("10"),
									Layers: media.TransformOutputCustomPresetCodecJpgImageLayerArray{
										&media.TransformOutputCustomPresetCodecJpgImageLayerArgs{
											Quality: pulumi.Int(70),
											Height:  pulumi.String("180"),
											Label:   pulumi.String("test"),
											Width:   pulumi.String("120"),
										},
									},
								},
							},
							&media.TransformOutputCustomPresetCodecArgs{
								PngImage: &media.TransformOutputCustomPresetCodecPngImageArgs{
									StretchMode: pulumi.String("AutoSize"),
									SyncMode:    pulumi.String("Auto"),
									Start:       pulumi.String("{Best}"),
									Range:       pulumi.String("80"),
									Step:        pulumi.String("10"),
									Layers: media.TransformOutputCustomPresetCodecPngImageLayerArray{
										&media.TransformOutputCustomPresetCodecPngImageLayerArgs{
											Height: pulumi.String("180"),
											Label:  pulumi.String("test"),
											Width:  pulumi.String("120"),
										},
									},
								},
							},
						},
						Formats: media.TransformOutputCustomPresetFormatArray{
							&media.TransformOutputCustomPresetFormatArgs{
								Jpg: &media.TransformOutputCustomPresetFormatJpgArgs{
									FilenamePattern: pulumi.String("test{Basename}"),
								},
							},
							&media.TransformOutputCustomPresetFormatArgs{
								Mp4: &media.TransformOutputCustomPresetFormatMp4Args{
									FilenamePattern: pulumi.String("test{Bitrate}"),
									OutputFiles: media.TransformOutputCustomPresetFormatMp4OutputFileArray{
										&media.TransformOutputCustomPresetFormatMp4OutputFileArgs{
											Labels: pulumi.StringArray{
												pulumi.String("test"),
												pulumi.String("ppe"),
											},
										},
									},
								},
							},
							&media.TransformOutputCustomPresetFormatArgs{
								Png: &media.TransformOutputCustomPresetFormatPngArgs{
									FilenamePattern: pulumi.String("test{Basename}"),
								},
							},
							&media.TransformOutputCustomPresetFormatArgs{
								TransportStream: &media.TransformOutputCustomPresetFormatTransportStreamArgs{
									FilenamePattern: pulumi.String("test{Bitrate}"),
									OutputFiles: media.TransformOutputCustomPresetFormatTransportStreamOutputFileArray{
										&media.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs{
											Labels: pulumi.StringArray{
												pulumi.String("prod"),
											},
										},
									},
								},
							},
						},
						Filter: &media.TransformOutputCustomPresetFilterArgs{
							CropRectangle: &media.TransformOutputCustomPresetFilterCropRectangleArgs{
								Height: pulumi.String("240"),
								Left:   pulumi.String("30"),
								Top:    pulumi.String("360"),
								Width:  pulumi.String("70"),
							},
							Deinterlace: &media.TransformOutputCustomPresetFilterDeinterlaceArgs{
								Parity: pulumi.String("TopFieldFirst"),
								Mode:   pulumi.String("AutoPixelAdaptive"),
							},
							FadeIn: &media.TransformOutputCustomPresetFilterFadeInArgs{
								Duration:  pulumi.String("PT5S"),
								FadeColor: pulumi.String("0xFF0000"),
								Start:     pulumi.String("10"),
							},
							FadeOut: &media.TransformOutputCustomPresetFilterFadeOutArgs{
								Duration:  pulumi.String("90%%"),
								FadeColor: pulumi.String("#FF0C7B"),
								Start:     pulumi.String("10%%"),
							},
							Rotation: pulumi.String("Auto"),
							Overlays: media.TransformOutputCustomPresetFilterOverlayArray{
								&media.TransformOutputCustomPresetFilterOverlayArgs{
									Audio: &media.TransformOutputCustomPresetFilterOverlayAudioArgs{
										InputLabel:      pulumi.String("label.jpg"),
										Start:           pulumi.String("PT5S"),
										End:             pulumi.String("PT30S"),
										FadeInDuration:  pulumi.String("PT1S"),
										FadeOutDuration: pulumi.String("PT2S"),
										AudioGainLevel:  pulumi.Float64(1),
									},
								},
								&media.TransformOutputCustomPresetFilterOverlayArgs{
									Video: &media.TransformOutputCustomPresetFilterOverlayVideoArgs{
										InputLabel:      pulumi.String("label.jpg"),
										Start:           pulumi.String("PT5S"),
										End:             pulumi.String("PT30S"),
										FadeInDuration:  pulumi.String("PT1S"),
										FadeOutDuration: pulumi.String("PT2S"),
										AudioGainLevel:  pulumi.Float64(1),
										Opacity:         pulumi.Float64(1),
										Position: &media.TransformOutputCustomPresetFilterOverlayVideoPositionArgs{
											Height: pulumi.String("180"),
											Left:   pulumi.String("20"),
											Top:    pulumi.String("240"),
											Width:  pulumi.String("140"),
										},
										CropRectangle: &media.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs{
											Height: pulumi.String("240"),
											Left:   pulumi.String("30"),
											Top:    pulumi.String("360"),
											Width:  pulumi.String("70"),
										},
									},
								},
							},
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
return await Deployment.RunAsync(() => 
{
    var example = new Azure.Core.ResourceGroup("example", new()
    {
        Name = "media-resources",
        Location = "West Europe",
    });
    var exampleAccount = new Azure.Storage.Account("example", new()
    {
        Name = "examplestoracc",
        ResourceGroupName = example.Name,
        Location = example.Location,
        AccountTier = "Standard",
        AccountReplicationType = "GRS",
    });
    var exampleServiceAccount = new Azure.Media.ServiceAccount("example", new()
    {
        Name = "examplemediaacc",
        Location = example.Location,
        ResourceGroupName = example.Name,
        StorageAccounts = new[]
        {
            new Azure.Media.Inputs.ServiceAccountStorageAccountArgs
            {
                Id = exampleAccount.Id,
                IsPrimary = true,
            },
        },
    });
    var exampleTransform = new Azure.Media.Transform("example", new()
    {
        Name = "transform1",
        ResourceGroupName = example.Name,
        MediaServicesAccountName = exampleServiceAccount.Name,
        Description = "My transform description",
        Outputs = new[]
        {
            new Azure.Media.Inputs.TransformOutputArgs
            {
                RelativePriority = "Normal",
                OnErrorAction = "ContinueJob",
                BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
                {
                    PresetName = "AACGoodQualityAudio",
                    PresetConfiguration = new Azure.Media.Inputs.TransformOutputBuiltinPresetPresetConfigurationArgs
                    {
                        Complexity = "Balanced",
                        InterleaveOutput = "NonInterleavedOutput",
                        KeyFrameIntervalInSeconds = 123122.5,
                        MaxBitrateBps = 300000,
                        MaxHeight = 480,
                        MaxLayers = 14,
                        MinBitrateBps = 200000,
                        MinHeight = 360,
                    },
                },
            },
            new Azure.Media.Inputs.TransformOutputArgs
            {
                RelativePriority = "Low",
                OnErrorAction = "ContinueJob",
                AudioAnalyzerPreset = new Azure.Media.Inputs.TransformOutputAudioAnalyzerPresetArgs
                {
                    AudioLanguage = "en-US",
                    AudioAnalysisMode = "Basic",
                    ExperimentalOptions = 
                    {
                        { "env", "test" },
                    },
                },
            },
            new Azure.Media.Inputs.TransformOutputArgs
            {
                RelativePriority = "Low",
                OnErrorAction = "StopProcessingJob",
                FaceDetectorPreset = new Azure.Media.Inputs.TransformOutputFaceDetectorPresetArgs
                {
                    AnalysisResolution = "StandardDefinition",
                    BlurType = "Med",
                    FaceRedactorMode = "Combined",
                    ExperimentalOptions = 
                    {
                        { "env", "test" },
                    },
                },
            },
            new Azure.Media.Inputs.TransformOutputArgs
            {
                RelativePriority = "Normal",
                OnErrorAction = "StopProcessingJob",
                VideoAnalyzerPreset = new Azure.Media.Inputs.TransformOutputVideoAnalyzerPresetArgs
                {
                    AudioLanguage = "en-US",
                    AudioAnalysisMode = "Basic",
                    InsightsType = "AllInsights",
                    ExperimentalOptions = 
                    {
                        { "env", "test" },
                    },
                },
            },
            new Azure.Media.Inputs.TransformOutputArgs
            {
                RelativePriority = "Low",
                OnErrorAction = "ContinueJob",
                CustomPreset = new Azure.Media.Inputs.TransformOutputCustomPresetArgs
                {
                    Codecs = new[]
                    {
                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                        {
                            AacAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecAacAudioArgs
                            {
                                Bitrate = 128000,
                                Channels = 2,
                                SamplingRate = 48000,
                                Profile = "AacLc",
                            },
                        },
                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                        {
                            CopyAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecCopyAudioArgs
                            {
                                Label = "test",
                            },
                        },
                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                        {
                            CopyVideo = new Azure.Media.Inputs.TransformOutputCustomPresetCodecCopyVideoArgs
                            {
                                Label = "test",
                            },
                        },
                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                        {
                            H264Video = new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoArgs
                            {
                                KeyFrameInterval = "PT1S",
                                StretchMode = "AutoSize",
                                SyncMode = "Auto",
                                SceneChangeDetectionEnabled = false,
                                RateControlMode = "ABR",
                                Complexity = "Quality",
                                Layers = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoLayerArgs
                                    {
                                        Width = "64",
                                        Height = "64",
                                        Bitrate = 1045000,
                                        MaxBitrate = 1045000,
                                        BFrames = 3,
                                        Slices = 0,
                                        AdaptiveBFrameEnabled = true,
                                        Profile = "Auto",
                                        Level = "auto",
                                        BufferWindow = "PT5S",
                                        ReferenceFrames = 4,
                                        Crf = 23,
                                        EntropyMode = "Cabac",
                                    },
                                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoLayerArgs
                                    {
                                        Width = "64",
                                        Height = "64",
                                        Bitrate = 1000,
                                        MaxBitrate = 1000,
                                        BFrames = 3,
                                        FrameRate = "32",
                                        Slices = 1,
                                        AdaptiveBFrameEnabled = true,
                                        Profile = "High444",
                                        Level = "auto",
                                        BufferWindow = "PT5S",
                                        ReferenceFrames = 4,
                                        Crf = 23,
                                        EntropyMode = "Cavlc",
                                    },
                                },
                            },
                        },
                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                        {
                            H265Video = new Azure.Media.Inputs.TransformOutputCustomPresetCodecH265VideoArgs
                            {
                                KeyFrameInterval = "PT2S",
                                StretchMode = "AutoSize",
                                SyncMode = "Auto",
                                SceneChangeDetectionEnabled = false,
                                Complexity = "Speed",
                                Layers = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecH265VideoLayerArgs
                                    {
                                        Width = "64",
                                        Height = "64",
                                        Bitrate = 1045000,
                                        MaxBitrate = 1045000,
                                        BFrames = 3,
                                        Slices = 5,
                                        AdaptiveBFrameEnabled = true,
                                        Profile = "Auto",
                                        Label = "test",
                                        Level = "auto",
                                        BufferWindow = "PT5S",
                                        FrameRate = "32",
                                        ReferenceFrames = 4,
                                        Crf = 23,
                                    },
                                },
                            },
                        },
                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                        {
                            JpgImage = new Azure.Media.Inputs.TransformOutputCustomPresetCodecJpgImageArgs
                            {
                                StretchMode = "AutoSize",
                                SyncMode = "Auto",
                                Start = "10",
                                Range = "100%%",
                                SpriteColumn = 1,
                                Step = "10",
                                Layers = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecJpgImageLayerArgs
                                    {
                                        Quality = 70,
                                        Height = "180",
                                        Label = "test",
                                        Width = "120",
                                    },
                                },
                            },
                        },
                        new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                        {
                            PngImage = new Azure.Media.Inputs.TransformOutputCustomPresetCodecPngImageArgs
                            {
                                StretchMode = "AutoSize",
                                SyncMode = "Auto",
                                Start = "{Best}",
                                Range = "80",
                                Step = "10",
                                Layers = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecPngImageLayerArgs
                                    {
                                        Height = "180",
                                        Label = "test",
                                        Width = "120",
                                    },
                                },
                            },
                        },
                    },
                    Formats = new[]
                    {
                        new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                        {
                            Jpg = new Azure.Media.Inputs.TransformOutputCustomPresetFormatJpgArgs
                            {
                                FilenamePattern = "test{Basename}",
                            },
                        },
                        new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                        {
                            Mp4 = new Azure.Media.Inputs.TransformOutputCustomPresetFormatMp4Args
                            {
                                FilenamePattern = "test{Bitrate}",
                                OutputFiles = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetFormatMp4OutputFileArgs
                                    {
                                        Labels = new[]
                                        {
                                            "test",
                                            "ppe",
                                        },
                                    },
                                },
                            },
                        },
                        new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                        {
                            Png = new Azure.Media.Inputs.TransformOutputCustomPresetFormatPngArgs
                            {
                                FilenamePattern = "test{Basename}",
                            },
                        },
                        new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                        {
                            TransportStream = new Azure.Media.Inputs.TransformOutputCustomPresetFormatTransportStreamArgs
                            {
                                FilenamePattern = "test{Bitrate}",
                                OutputFiles = new[]
                                {
                                    new Azure.Media.Inputs.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs
                                    {
                                        Labels = new[]
                                        {
                                            "prod",
                                        },
                                    },
                                },
                            },
                        },
                    },
                    Filter = new Azure.Media.Inputs.TransformOutputCustomPresetFilterArgs
                    {
                        CropRectangle = new Azure.Media.Inputs.TransformOutputCustomPresetFilterCropRectangleArgs
                        {
                            Height = "240",
                            Left = "30",
                            Top = "360",
                            Width = "70",
                        },
                        Deinterlace = new Azure.Media.Inputs.TransformOutputCustomPresetFilterDeinterlaceArgs
                        {
                            Parity = "TopFieldFirst",
                            Mode = "AutoPixelAdaptive",
                        },
                        FadeIn = new Azure.Media.Inputs.TransformOutputCustomPresetFilterFadeInArgs
                        {
                            Duration = "PT5S",
                            FadeColor = "0xFF0000",
                            Start = "10",
                        },
                        FadeOut = new Azure.Media.Inputs.TransformOutputCustomPresetFilterFadeOutArgs
                        {
                            Duration = "90%%",
                            FadeColor = "#FF0C7B",
                            Start = "10%%",
                        },
                        Rotation = "Auto",
                        Overlays = new[]
                        {
                            new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayArgs
                            {
                                Audio = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayAudioArgs
                                {
                                    InputLabel = "label.jpg",
                                    Start = "PT5S",
                                    End = "PT30S",
                                    FadeInDuration = "PT1S",
                                    FadeOutDuration = "PT2S",
                                    AudioGainLevel = 1,
                                },
                            },
                            new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayArgs
                            {
                                Video = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoArgs
                                {
                                    InputLabel = "label.jpg",
                                    Start = "PT5S",
                                    End = "PT30S",
                                    FadeInDuration = "PT1S",
                                    FadeOutDuration = "PT2S",
                                    AudioGainLevel = 1,
                                    Opacity = 1,
                                    Position = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoPositionArgs
                                    {
                                        Height = "180",
                                        Left = "20",
                                        Top = "240",
                                        Width = "140",
                                    },
                                    CropRectangle = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs
                                    {
                                        Height = "240",
                                        Left = "30",
                                        Top = "360",
                                        Width = "70",
                                    },
                                },
                            },
                        },
                    },
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.core.ResourceGroup;
import com.pulumi.azure.core.ResourceGroupArgs;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.media.ServiceAccount;
import com.pulumi.azure.media.ServiceAccountArgs;
import com.pulumi.azure.media.inputs.ServiceAccountStorageAccountArgs;
import com.pulumi.azure.media.Transform;
import com.pulumi.azure.media.TransformArgs;
import com.pulumi.azure.media.inputs.TransformOutputArgs;
import com.pulumi.azure.media.inputs.TransformOutputBuiltinPresetArgs;
import com.pulumi.azure.media.inputs.TransformOutputBuiltinPresetPresetConfigurationArgs;
import com.pulumi.azure.media.inputs.TransformOutputAudioAnalyzerPresetArgs;
import com.pulumi.azure.media.inputs.TransformOutputFaceDetectorPresetArgs;
import com.pulumi.azure.media.inputs.TransformOutputVideoAnalyzerPresetArgs;
import com.pulumi.azure.media.inputs.TransformOutputCustomPresetArgs;
import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterArgs;
import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterCropRectangleArgs;
import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterDeinterlaceArgs;
import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterFadeInArgs;
import com.pulumi.azure.media.inputs.TransformOutputCustomPresetFilterFadeOutArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var example = new ResourceGroup("example", ResourceGroupArgs.builder()
            .name("media-resources")
            .location("West Europe")
            .build());
        var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
            .name("examplestoracc")
            .resourceGroupName(example.name())
            .location(example.location())
            .accountTier("Standard")
            .accountReplicationType("GRS")
            .build());
        var exampleServiceAccount = new ServiceAccount("exampleServiceAccount", ServiceAccountArgs.builder()
            .name("examplemediaacc")
            .location(example.location())
            .resourceGroupName(example.name())
            .storageAccounts(ServiceAccountStorageAccountArgs.builder()
                .id(exampleAccount.id())
                .isPrimary(true)
                .build())
            .build());
        var exampleTransform = new Transform("exampleTransform", TransformArgs.builder()
            .name("transform1")
            .resourceGroupName(example.name())
            .mediaServicesAccountName(exampleServiceAccount.name())
            .description("My transform description")
            .outputs(            
                TransformOutputArgs.builder()
                    .relativePriority("Normal")
                    .onErrorAction("ContinueJob")
                    .builtinPreset(TransformOutputBuiltinPresetArgs.builder()
                        .presetName("AACGoodQualityAudio")
                        .presetConfiguration(TransformOutputBuiltinPresetPresetConfigurationArgs.builder()
                            .complexity("Balanced")
                            .interleaveOutput("NonInterleavedOutput")
                            .keyFrameIntervalInSeconds(123122.5)
                            .maxBitrateBps(300000)
                            .maxHeight(480)
                            .maxLayers(14)
                            .minBitrateBps(200000)
                            .minHeight(360)
                            .build())
                        .build())
                    .build(),
                TransformOutputArgs.builder()
                    .relativePriority("Low")
                    .onErrorAction("ContinueJob")
                    .audioAnalyzerPreset(TransformOutputAudioAnalyzerPresetArgs.builder()
                        .audioLanguage("en-US")
                        .audioAnalysisMode("Basic")
                        .experimentalOptions(Map.of("env", "test"))
                        .build())
                    .build(),
                TransformOutputArgs.builder()
                    .relativePriority("Low")
                    .onErrorAction("StopProcessingJob")
                    .faceDetectorPreset(TransformOutputFaceDetectorPresetArgs.builder()
                        .analysisResolution("StandardDefinition")
                        .blurType("Med")
                        .faceRedactorMode("Combined")
                        .experimentalOptions(Map.of("env", "test"))
                        .build())
                    .build(),
                TransformOutputArgs.builder()
                    .relativePriority("Normal")
                    .onErrorAction("StopProcessingJob")
                    .videoAnalyzerPreset(TransformOutputVideoAnalyzerPresetArgs.builder()
                        .audioLanguage("en-US")
                        .audioAnalysisMode("Basic")
                        .insightsType("AllInsights")
                        .experimentalOptions(Map.of("env", "test"))
                        .build())
                    .build(),
                TransformOutputArgs.builder()
                    .relativePriority("Low")
                    .onErrorAction("ContinueJob")
                    .customPreset(TransformOutputCustomPresetArgs.builder()
                        .codecs(                        
                            TransformOutputCustomPresetCodecArgs.builder()
                                .aacAudio(TransformOutputCustomPresetCodecAacAudioArgs.builder()
                                    .bitrate(128000)
                                    .channels(2)
                                    .samplingRate(48000)
                                    .profile("AacLc")
                                    .build())
                                .build(),
                            TransformOutputCustomPresetCodecArgs.builder()
                                .copyAudio(TransformOutputCustomPresetCodecCopyAudioArgs.builder()
                                    .label("test")
                                    .build())
                                .build(),
                            TransformOutputCustomPresetCodecArgs.builder()
                                .copyVideo(TransformOutputCustomPresetCodecCopyVideoArgs.builder()
                                    .label("test")
                                    .build())
                                .build(),
                            TransformOutputCustomPresetCodecArgs.builder()
                                .h264Video(TransformOutputCustomPresetCodecH264VideoArgs.builder()
                                    .keyFrameInterval("PT1S")
                                    .stretchMode("AutoSize")
                                    .syncMode("Auto")
                                    .sceneChangeDetectionEnabled(false)
                                    .rateControlMode("ABR")
                                    .complexity("Quality")
                                    .layers(                                    
                                        TransformOutputCustomPresetCodecH264VideoLayerArgs.builder()
                                            .width("64")
                                            .height("64")
                                            .bitrate(1045000)
                                            .maxBitrate(1045000)
                                            .bFrames(3)
                                            .slices(0)
                                            .adaptiveBFrameEnabled(true)
                                            .profile("Auto")
                                            .level("auto")
                                            .bufferWindow("PT5S")
                                            .referenceFrames(4)
                                            .crf(23)
                                            .entropyMode("Cabac")
                                            .build(),
                                        TransformOutputCustomPresetCodecH264VideoLayerArgs.builder()
                                            .width("64")
                                            .height("64")
                                            .bitrate(1000)
                                            .maxBitrate(1000)
                                            .bFrames(3)
                                            .frameRate("32")
                                            .slices(1)
                                            .adaptiveBFrameEnabled(true)
                                            .profile("High444")
                                            .level("auto")
                                            .bufferWindow("PT5S")
                                            .referenceFrames(4)
                                            .crf(23)
                                            .entropyMode("Cavlc")
                                            .build())
                                    .build())
                                .build(),
                            TransformOutputCustomPresetCodecArgs.builder()
                                .h265Video(TransformOutputCustomPresetCodecH265VideoArgs.builder()
                                    .keyFrameInterval("PT2S")
                                    .stretchMode("AutoSize")
                                    .syncMode("Auto")
                                    .sceneChangeDetectionEnabled(false)
                                    .complexity("Speed")
                                    .layers(TransformOutputCustomPresetCodecH265VideoLayerArgs.builder()
                                        .width("64")
                                        .height("64")
                                        .bitrate(1045000)
                                        .maxBitrate(1045000)
                                        .bFrames(3)
                                        .slices(5)
                                        .adaptiveBFrameEnabled(true)
                                        .profile("Auto")
                                        .label("test")
                                        .level("auto")
                                        .bufferWindow("PT5S")
                                        .frameRate("32")
                                        .referenceFrames(4)
                                        .crf(23)
                                        .build())
                                    .build())
                                .build(),
                            TransformOutputCustomPresetCodecArgs.builder()
                                .jpgImage(TransformOutputCustomPresetCodecJpgImageArgs.builder()
                                    .stretchMode("AutoSize")
                                    .syncMode("Auto")
                                    .start("10")
                                    .range("100%%")
                                    .spriteColumn(1)
                                    .step("10")
                                    .layers(TransformOutputCustomPresetCodecJpgImageLayerArgs.builder()
                                        .quality(70)
                                        .height("180")
                                        .label("test")
                                        .width("120")
                                        .build())
                                    .build())
                                .build(),
                            TransformOutputCustomPresetCodecArgs.builder()
                                .pngImage(TransformOutputCustomPresetCodecPngImageArgs.builder()
                                    .stretchMode("AutoSize")
                                    .syncMode("Auto")
                                    .start("{Best}")
                                    .range("80")
                                    .step("10")
                                    .layers(TransformOutputCustomPresetCodecPngImageLayerArgs.builder()
                                        .height("180")
                                        .label("test")
                                        .width("120")
                                        .build())
                                    .build())
                                .build())
                        .formats(                        
                            TransformOutputCustomPresetFormatArgs.builder()
                                .jpg(TransformOutputCustomPresetFormatJpgArgs.builder()
                                    .filenamePattern("test{Basename}")
                                    .build())
                                .build(),
                            TransformOutputCustomPresetFormatArgs.builder()
                                .mp4(TransformOutputCustomPresetFormatMp4Args.builder()
                                    .filenamePattern("test{Bitrate}")
                                    .outputFiles(TransformOutputCustomPresetFormatMp4OutputFileArgs.builder()
                                        .labels(                                        
                                            "test",
                                            "ppe")
                                        .build())
                                    .build())
                                .build(),
                            TransformOutputCustomPresetFormatArgs.builder()
                                .png(TransformOutputCustomPresetFormatPngArgs.builder()
                                    .filenamePattern("test{Basename}")
                                    .build())
                                .build(),
                            TransformOutputCustomPresetFormatArgs.builder()
                                .transportStream(TransformOutputCustomPresetFormatTransportStreamArgs.builder()
                                    .filenamePattern("test{Bitrate}")
                                    .outputFiles(TransformOutputCustomPresetFormatTransportStreamOutputFileArgs.builder()
                                        .labels("prod")
                                        .build())
                                    .build())
                                .build())
                        .filter(TransformOutputCustomPresetFilterArgs.builder()
                            .cropRectangle(TransformOutputCustomPresetFilterCropRectangleArgs.builder()
                                .height("240")
                                .left("30")
                                .top("360")
                                .width("70")
                                .build())
                            .deinterlace(TransformOutputCustomPresetFilterDeinterlaceArgs.builder()
                                .parity("TopFieldFirst")
                                .mode("AutoPixelAdaptive")
                                .build())
                            .fadeIn(TransformOutputCustomPresetFilterFadeInArgs.builder()
                                .duration("PT5S")
                                .fadeColor("0xFF0000")
                                .start("10")
                                .build())
                            .fadeOut(TransformOutputCustomPresetFilterFadeOutArgs.builder()
                                .duration("90%%")
                                .fadeColor("#FF0C7B")
                                .start("10%%")
                                .build())
                            .rotation("Auto")
                            .overlays(                            
                                TransformOutputCustomPresetFilterOverlayArgs.builder()
                                    .audio(TransformOutputCustomPresetFilterOverlayAudioArgs.builder()
                                        .inputLabel("label.jpg")
                                        .start("PT5S")
                                        .end("PT30S")
                                        .fadeInDuration("PT1S")
                                        .fadeOutDuration("PT2S")
                                        .audioGainLevel(1)
                                        .build())
                                    .build(),
                                TransformOutputCustomPresetFilterOverlayArgs.builder()
                                    .video(TransformOutputCustomPresetFilterOverlayVideoArgs.builder()
                                        .inputLabel("label.jpg")
                                        .start("PT5S")
                                        .end("PT30S")
                                        .fadeInDuration("PT1S")
                                        .fadeOutDuration("PT2S")
                                        .audioGainLevel(1)
                                        .opacity(1)
                                        .position(TransformOutputCustomPresetFilterOverlayVideoPositionArgs.builder()
                                            .height("180")
                                            .left("20")
                                            .top("240")
                                            .width("140")
                                            .build())
                                        .cropRectangle(TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs.builder()
                                            .height("240")
                                            .left("30")
                                            .top("360")
                                            .width("70")
                                            .build())
                                        .build())
                                    .build())
                            .build())
                        .build())
                    .build())
            .build());
    }
}
resources:
  example:
    type: azure:core:ResourceGroup
    properties:
      name: media-resources
      location: West Europe
  exampleAccount:
    type: azure:storage:Account
    name: example
    properties:
      name: examplestoracc
      resourceGroupName: ${example.name}
      location: ${example.location}
      accountTier: Standard
      accountReplicationType: GRS
  exampleServiceAccount:
    type: azure:media:ServiceAccount
    name: example
    properties:
      name: examplemediaacc
      location: ${example.location}
      resourceGroupName: ${example.name}
      storageAccounts:
        - id: ${exampleAccount.id}
          isPrimary: true
  exampleTransform:
    type: azure:media:Transform
    name: example
    properties:
      name: transform1
      resourceGroupName: ${example.name}
      mediaServicesAccountName: ${exampleServiceAccount.name}
      description: My transform description
      outputs:
        - relativePriority: Normal
          onErrorAction: ContinueJob
          builtinPreset:
            presetName: AACGoodQualityAudio
            presetConfiguration:
              complexity: Balanced
              interleaveOutput: NonInterleavedOutput
              keyFrameIntervalInSeconds: 123122.5
              maxBitrateBps: 300000
              maxHeight: 480
              maxLayers: 14
              minBitrateBps: 200000
              minHeight: 360
        - relativePriority: Low
          onErrorAction: ContinueJob
          audioAnalyzerPreset:
            audioLanguage: en-US
            audioAnalysisMode: Basic
            experimentalOptions:
              env: test
        - relativePriority: Low
          onErrorAction: StopProcessingJob
          faceDetectorPreset:
            analysisResolution: StandardDefinition
            blurType: Med
            faceRedactorMode: Combined
            experimentalOptions:
              env: test
        - relativePriority: Normal
          onErrorAction: StopProcessingJob
          videoAnalyzerPreset:
            audioLanguage: en-US
            audioAnalysisMode: Basic
            insightsType: AllInsights
            experimentalOptions:
              env: test
        - relativePriority: Low
          onErrorAction: ContinueJob
          customPreset:
            codecs:
              - aacAudio:
                  bitrate: 128000
                  channels: 2
                  samplingRate: 48000
                  profile: AacLc
              - copyAudio:
                  label: test
              - copyVideo:
                  label: test
              - h264Video:
                  keyFrameInterval: PT1S
                  stretchMode: AutoSize
                  syncMode: Auto
                  sceneChangeDetectionEnabled: false
                  rateControlMode: ABR
                  complexity: Quality
                  layers:
                    - width: '64'
                      height: '64'
                      bitrate: 1.045e+06
                      maxBitrate: 1.045e+06
                      bFrames: 3
                      slices: 0
                      adaptiveBFrameEnabled: true
                      profile: Auto
                      level: auto
                      bufferWindow: PT5S
                      referenceFrames: 4
                      crf: 23
                      entropyMode: Cabac
                    - width: '64'
                      height: '64'
                      bitrate: 1000
                      maxBitrate: 1000
                      bFrames: 3
                      frameRate: '32'
                      slices: 1
                      adaptiveBFrameEnabled: true
                      profile: High444
                      level: auto
                      bufferWindow: PT5S
                      referenceFrames: 4
                      crf: 23
                      entropyMode: Cavlc
              - h265Video:
                  keyFrameInterval: PT2S
                  stretchMode: AutoSize
                  syncMode: Auto
                  sceneChangeDetectionEnabled: false
                  complexity: Speed
                  layers:
                    - width: '64'
                      height: '64'
                      bitrate: 1.045e+06
                      maxBitrate: 1.045e+06
                      bFrames: 3
                      slices: 5
                      adaptiveBFrameEnabled: true
                      profile: Auto
                      label: test
                      level: auto
                      bufferWindow: PT5S
                      frameRate: '32'
                      referenceFrames: 4
                      crf: 23
              - jpgImage:
                  stretchMode: AutoSize
                  syncMode: Auto
                  start: '10'
                  range: 100%%
                  spriteColumn: 1
                  step: '10'
                  layers:
                    - quality: 70
                      height: '180'
                      label: test
                      width: '120'
              - pngImage:
                  stretchMode: AutoSize
                  syncMode: Auto
                  start: '{Best}'
                  range: '80'
                  step: '10'
                  layers:
                    - height: '180'
                      label: test
                      width: '120'
            formats:
              - jpg:
                  filenamePattern: test{Basename}
              - mp4:
                  filenamePattern: test{Bitrate}
                  outputFiles:
                    - labels:
                        - test
                        - ppe
              - png:
                  filenamePattern: test{Basename}
              - transportStream:
                  filenamePattern: test{Bitrate}
                  outputFiles:
                    - labels:
                        - prod
            filter:
              cropRectangle:
                height: '240'
                left: '30'
                top: '360'
                width: '70'
              deinterlace:
                parity: TopFieldFirst
                mode: AutoPixelAdaptive
              fadeIn:
                duration: PT5S
                fadeColor: 0xFF0000
                start: '10'
              fadeOut:
                duration: 90%%
                fadeColor: '#FF0C7B'
                start: 10%%
              rotation: Auto
              overlays:
                - audio:
                    inputLabel: label.jpg
                    start: PT5S
                    end: PT30S
                    fadeInDuration: PT1S
                    fadeOutDuration: PT2S
                    audioGainLevel: 1
                - video:
                    inputLabel: label.jpg
                    start: PT5S
                    end: PT30S
                    fadeInDuration: PT1S
                    fadeOutDuration: PT2S
                    audioGainLevel: 1
                    opacity: 1
                    position:
                      height: '180'
                      left: '20'
                      top: '240'
                      width: '140'
                    cropRectangle:
                      height: '240'
                      left: '30'
                      top: '360'
                      width: '70'
Create Transform Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Transform(name: string, args: TransformArgs, opts?: CustomResourceOptions);@overload
def Transform(resource_name: str,
              args: TransformArgs,
              opts: Optional[ResourceOptions] = None)
@overload
def Transform(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              media_services_account_name: Optional[str] = None,
              resource_group_name: Optional[str] = None,
              description: Optional[str] = None,
              name: Optional[str] = None,
              outputs: Optional[Sequence[TransformOutputArgs]] = None)func NewTransform(ctx *Context, name string, args TransformArgs, opts ...ResourceOption) (*Transform, error)public Transform(string name, TransformArgs args, CustomResourceOptions? opts = null)
public Transform(String name, TransformArgs args)
public Transform(String name, TransformArgs args, CustomResourceOptions options)
type: azure:media:Transform
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
 - The unique name of the resource.
 - args TransformArgs
 - The arguments to resource properties.
 - opts CustomResourceOptions
 - Bag of options to control resource's behavior.
 
- resource_name str
 - The unique name of the resource.
 - args TransformArgs
 - The arguments to resource properties.
 - opts ResourceOptions
 - Bag of options to control resource's behavior.
 
- ctx Context
 - Context object for the current deployment.
 - name string
 - The unique name of the resource.
 - args TransformArgs
 - The arguments to resource properties.
 - opts ResourceOption
 - Bag of options to control resource's behavior.
 
- name string
 - The unique name of the resource.
 - args TransformArgs
 - The arguments to resource properties.
 - opts CustomResourceOptions
 - Bag of options to control resource's behavior.
 
- name String
 - The unique name of the resource.
 - args TransformArgs
 - The arguments to resource properties.
 - options CustomResourceOptions
 - Bag of options to control resource's behavior.
 
Constructor example
The following reference example uses placeholder values for all input properties.
var transformResource = new Azure.Media.Transform("transformResource", new()
{
    MediaServicesAccountName = "string",
    ResourceGroupName = "string",
    Description = "string",
    Name = "string",
    Outputs = new[]
    {
        new Azure.Media.Inputs.TransformOutputArgs
        {
            AudioAnalyzerPreset = new Azure.Media.Inputs.TransformOutputAudioAnalyzerPresetArgs
            {
                AudioAnalysisMode = "string",
                AudioLanguage = "string",
                ExperimentalOptions = 
                {
                    { "string", "string" },
                },
            },
            BuiltinPreset = new Azure.Media.Inputs.TransformOutputBuiltinPresetArgs
            {
                PresetName = "string",
                PresetConfiguration = new Azure.Media.Inputs.TransformOutputBuiltinPresetPresetConfigurationArgs
                {
                    Complexity = "string",
                    InterleaveOutput = "string",
                    KeyFrameIntervalInSeconds = 0,
                    MaxBitrateBps = 0,
                    MaxHeight = 0,
                    MaxLayers = 0,
                    MinBitrateBps = 0,
                    MinHeight = 0,
                },
            },
            CustomPreset = new Azure.Media.Inputs.TransformOutputCustomPresetArgs
            {
                Codecs = new[]
                {
                    new Azure.Media.Inputs.TransformOutputCustomPresetCodecArgs
                    {
                        AacAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecAacAudioArgs
                        {
                            Bitrate = 0,
                            Channels = 0,
                            Label = "string",
                            Profile = "string",
                            SamplingRate = 0,
                        },
                        CopyAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecCopyAudioArgs
                        {
                            Label = "string",
                        },
                        CopyVideo = new Azure.Media.Inputs.TransformOutputCustomPresetCodecCopyVideoArgs
                        {
                            Label = "string",
                        },
                        DdAudio = new Azure.Media.Inputs.TransformOutputCustomPresetCodecDdAudioArgs
                        {
                            Bitrate = 0,
                            Channels = 0,
                            Label = "string",
                            SamplingRate = 0,
                        },
                        H264Video = new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoArgs
                        {
                            Complexity = "string",
                            KeyFrameInterval = "string",
                            Label = "string",
                            Layers = new[]
                            {
                                new Azure.Media.Inputs.TransformOutputCustomPresetCodecH264VideoLayerArgs
                                {
                                    Bitrate = 0,
                                    FrameRate = "string",
                                    Label = "string",
                                    BufferWindow = "string",
                                    Crf = 0,
                                    EntropyMode = "string",
                                    AdaptiveBFrameEnabled = false,
                                    Height = "string",
                                    BFrames = 0,
                                    Level = "string",
                                    MaxBitrate = 0,
                                    Profile = "string",
                                    ReferenceFrames = 0,
                                    Slices = 0,
                                    Width = "string",
                                },
                            },
                            RateControlMode = "string",
                            SceneChangeDetectionEnabled = false,
                            StretchMode = "string",
                            SyncMode = "string",
                        },
                        H265Video = new Azure.Media.Inputs.TransformOutputCustomPresetCodecH265VideoArgs
                        {
                            Complexity = "string",
                            KeyFrameInterval = "string",
                            Label = "string",
                            Layers = new[]
                            {
                                new Azure.Media.Inputs.TransformOutputCustomPresetCodecH265VideoLayerArgs
                                {
                                    Bitrate = 0,
                                    Height = "string",
                                    BFrames = 0,
                                    BufferWindow = "string",
                                    Crf = 0,
                                    FrameRate = "string",
                                    AdaptiveBFrameEnabled = false,
                                    Label = "string",
                                    Level = "string",
                                    MaxBitrate = 0,
                                    Profile = "string",
                                    ReferenceFrames = 0,
                                    Slices = 0,
                                    Width = "string",
                                },
                            },
                            SceneChangeDetectionEnabled = false,
                            StretchMode = "string",
                            SyncMode = "string",
                        },
                        JpgImage = new Azure.Media.Inputs.TransformOutputCustomPresetCodecJpgImageArgs
                        {
                            Start = "string",
                            KeyFrameInterval = "string",
                            Label = "string",
                            Layers = new[]
                            {
                                new Azure.Media.Inputs.TransformOutputCustomPresetCodecJpgImageLayerArgs
                                {
                                    Height = "string",
                                    Label = "string",
                                    Quality = 0,
                                    Width = "string",
                                },
                            },
                            Range = "string",
                            SpriteColumn = 0,
                            Step = "string",
                            StretchMode = "string",
                            SyncMode = "string",
                        },
                        PngImage = new Azure.Media.Inputs.TransformOutputCustomPresetCodecPngImageArgs
                        {
                            Start = "string",
                            KeyFrameInterval = "string",
                            Label = "string",
                            Layers = new[]
                            {
                                new Azure.Media.Inputs.TransformOutputCustomPresetCodecPngImageLayerArgs
                                {
                                    Height = "string",
                                    Label = "string",
                                    Width = "string",
                                },
                            },
                            Range = "string",
                            Step = "string",
                            StretchMode = "string",
                            SyncMode = "string",
                        },
                    },
                },
                Formats = new[]
                {
                    new Azure.Media.Inputs.TransformOutputCustomPresetFormatArgs
                    {
                        Jpg = new Azure.Media.Inputs.TransformOutputCustomPresetFormatJpgArgs
                        {
                            FilenamePattern = "string",
                        },
                        Mp4 = new Azure.Media.Inputs.TransformOutputCustomPresetFormatMp4Args
                        {
                            FilenamePattern = "string",
                            OutputFiles = new[]
                            {
                                new Azure.Media.Inputs.TransformOutputCustomPresetFormatMp4OutputFileArgs
                                {
                                    Labels = new[]
                                    {
                                        "string",
                                    },
                                },
                            },
                        },
                        Png = new Azure.Media.Inputs.TransformOutputCustomPresetFormatPngArgs
                        {
                            FilenamePattern = "string",
                        },
                        TransportStream = new Azure.Media.Inputs.TransformOutputCustomPresetFormatTransportStreamArgs
                        {
                            FilenamePattern = "string",
                            OutputFiles = new[]
                            {
                                new Azure.Media.Inputs.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs
                                {
                                    Labels = new[]
                                    {
                                        "string",
                                    },
                                },
                            },
                        },
                    },
                },
                ExperimentalOptions = 
                {
                    { "string", "string" },
                },
                Filter = new Azure.Media.Inputs.TransformOutputCustomPresetFilterArgs
                {
                    CropRectangle = new Azure.Media.Inputs.TransformOutputCustomPresetFilterCropRectangleArgs
                    {
                        Height = "string",
                        Left = "string",
                        Top = "string",
                        Width = "string",
                    },
                    Deinterlace = new Azure.Media.Inputs.TransformOutputCustomPresetFilterDeinterlaceArgs
                    {
                        Mode = "string",
                        Parity = "string",
                    },
                    FadeIn = new Azure.Media.Inputs.TransformOutputCustomPresetFilterFadeInArgs
                    {
                        Duration = "string",
                        FadeColor = "string",
                        Start = "string",
                    },
                    FadeOut = new Azure.Media.Inputs.TransformOutputCustomPresetFilterFadeOutArgs
                    {
                        Duration = "string",
                        FadeColor = "string",
                        Start = "string",
                    },
                    Overlays = new[]
                    {
                        new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayArgs
                        {
                            Audio = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayAudioArgs
                            {
                                InputLabel = "string",
                                AudioGainLevel = 0,
                                End = "string",
                                FadeInDuration = "string",
                                FadeOutDuration = "string",
                                Start = "string",
                            },
                            Video = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoArgs
                            {
                                InputLabel = "string",
                                AudioGainLevel = 0,
                                CropRectangle = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs
                                {
                                    Height = "string",
                                    Left = "string",
                                    Top = "string",
                                    Width = "string",
                                },
                                End = "string",
                                FadeInDuration = "string",
                                FadeOutDuration = "string",
                                Opacity = 0,
                                Position = new Azure.Media.Inputs.TransformOutputCustomPresetFilterOverlayVideoPositionArgs
                                {
                                    Height = "string",
                                    Left = "string",
                                    Top = "string",
                                    Width = "string",
                                },
                                Start = "string",
                            },
                        },
                    },
                    Rotation = "string",
                },
            },
            OnErrorAction = "string",
            RelativePriority = "string",
        },
    },
});
example, err := media.NewTransform(ctx, "transformResource", &media.TransformArgs{
	MediaServicesAccountName: pulumi.String("string"),
	ResourceGroupName:        pulumi.String("string"),
	Description:              pulumi.String("string"),
	Name:                     pulumi.String("string"),
	Outputs: media.TransformOutputTypeArray{
		&media.TransformOutputTypeArgs{
			AudioAnalyzerPreset: &media.TransformOutputAudioAnalyzerPresetArgs{
				AudioAnalysisMode: pulumi.String("string"),
				AudioLanguage:     pulumi.String("string"),
				ExperimentalOptions: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
			},
			BuiltinPreset: &media.TransformOutputBuiltinPresetArgs{
				PresetName: pulumi.String("string"),
				PresetConfiguration: &media.TransformOutputBuiltinPresetPresetConfigurationArgs{
					Complexity:                pulumi.String("string"),
					InterleaveOutput:          pulumi.String("string"),
					KeyFrameIntervalInSeconds: pulumi.Float64(0),
					MaxBitrateBps:             pulumi.Int(0),
					MaxHeight:                 pulumi.Int(0),
					MaxLayers:                 pulumi.Int(0),
					MinBitrateBps:             pulumi.Int(0),
					MinHeight:                 pulumi.Int(0),
				},
			},
			CustomPreset: &media.TransformOutputCustomPresetArgs{
				Codecs: media.TransformOutputCustomPresetCodecArray{
					&media.TransformOutputCustomPresetCodecArgs{
						AacAudio: &media.TransformOutputCustomPresetCodecAacAudioArgs{
							Bitrate:      pulumi.Int(0),
							Channels:     pulumi.Int(0),
							Label:        pulumi.String("string"),
							Profile:      pulumi.String("string"),
							SamplingRate: pulumi.Int(0),
						},
						CopyAudio: &media.TransformOutputCustomPresetCodecCopyAudioArgs{
							Label: pulumi.String("string"),
						},
						CopyVideo: &media.TransformOutputCustomPresetCodecCopyVideoArgs{
							Label: pulumi.String("string"),
						},
						DdAudio: &media.TransformOutputCustomPresetCodecDdAudioArgs{
							Bitrate:      pulumi.Int(0),
							Channels:     pulumi.Int(0),
							Label:        pulumi.String("string"),
							SamplingRate: pulumi.Int(0),
						},
						H264Video: &media.TransformOutputCustomPresetCodecH264VideoArgs{
							Complexity:       pulumi.String("string"),
							KeyFrameInterval: pulumi.String("string"),
							Label:            pulumi.String("string"),
							Layers: media.TransformOutputCustomPresetCodecH264VideoLayerArray{
								&media.TransformOutputCustomPresetCodecH264VideoLayerArgs{
									Bitrate:               pulumi.Int(0),
									FrameRate:             pulumi.String("string"),
									Label:                 pulumi.String("string"),
									BufferWindow:          pulumi.String("string"),
									Crf:                   pulumi.Float64(0),
									EntropyMode:           pulumi.String("string"),
									AdaptiveBFrameEnabled: pulumi.Bool(false),
									Height:                pulumi.String("string"),
									BFrames:               pulumi.Int(0),
									Level:                 pulumi.String("string"),
									MaxBitrate:            pulumi.Int(0),
									Profile:               pulumi.String("string"),
									ReferenceFrames:       pulumi.Int(0),
									Slices:                pulumi.Int(0),
									Width:                 pulumi.String("string"),
								},
							},
							RateControlMode:             pulumi.String("string"),
							SceneChangeDetectionEnabled: pulumi.Bool(false),
							StretchMode:                 pulumi.String("string"),
							SyncMode:                    pulumi.String("string"),
						},
						H265Video: &media.TransformOutputCustomPresetCodecH265VideoArgs{
							Complexity:       pulumi.String("string"),
							KeyFrameInterval: pulumi.String("string"),
							Label:            pulumi.String("string"),
							Layers: media.TransformOutputCustomPresetCodecH265VideoLayerArray{
								&media.TransformOutputCustomPresetCodecH265VideoLayerArgs{
									Bitrate:               pulumi.Int(0),
									Height:                pulumi.String("string"),
									BFrames:               pulumi.Int(0),
									BufferWindow:          pulumi.String("string"),
									Crf:                   pulumi.Float64(0),
									FrameRate:             pulumi.String("string"),
									AdaptiveBFrameEnabled: pulumi.Bool(false),
									Label:                 pulumi.String("string"),
									Level:                 pulumi.String("string"),
									MaxBitrate:            pulumi.Int(0),
									Profile:               pulumi.String("string"),
									ReferenceFrames:       pulumi.Int(0),
									Slices:                pulumi.Int(0),
									Width:                 pulumi.String("string"),
								},
							},
							SceneChangeDetectionEnabled: pulumi.Bool(false),
							StretchMode:                 pulumi.String("string"),
							SyncMode:                    pulumi.String("string"),
						},
						JpgImage: &media.TransformOutputCustomPresetCodecJpgImageArgs{
							Start:            pulumi.String("string"),
							KeyFrameInterval: pulumi.String("string"),
							Label:            pulumi.String("string"),
							Layers: media.TransformOutputCustomPresetCodecJpgImageLayerArray{
								&media.TransformOutputCustomPresetCodecJpgImageLayerArgs{
									Height:  pulumi.String("string"),
									Label:   pulumi.String("string"),
									Quality: pulumi.Int(0),
									Width:   pulumi.String("string"),
								},
							},
							Range:        pulumi.String("string"),
							SpriteColumn: pulumi.Int(0),
							Step:         pulumi.String("string"),
							StretchMode:  pulumi.String("string"),
							SyncMode:     pulumi.String("string"),
						},
						PngImage: &media.TransformOutputCustomPresetCodecPngImageArgs{
							Start:            pulumi.String("string"),
							KeyFrameInterval: pulumi.String("string"),
							Label:            pulumi.String("string"),
							Layers: media.TransformOutputCustomPresetCodecPngImageLayerArray{
								&media.TransformOutputCustomPresetCodecPngImageLayerArgs{
									Height: pulumi.String("string"),
									Label:  pulumi.String("string"),
									Width:  pulumi.String("string"),
								},
							},
							Range:       pulumi.String("string"),
							Step:        pulumi.String("string"),
							StretchMode: pulumi.String("string"),
							SyncMode:    pulumi.String("string"),
						},
					},
				},
				Formats: media.TransformOutputCustomPresetFormatArray{
					&media.TransformOutputCustomPresetFormatArgs{
						Jpg: &media.TransformOutputCustomPresetFormatJpgArgs{
							FilenamePattern: pulumi.String("string"),
						},
						Mp4: &media.TransformOutputCustomPresetFormatMp4Args{
							FilenamePattern: pulumi.String("string"),
							OutputFiles: media.TransformOutputCustomPresetFormatMp4OutputFileArray{
								&media.TransformOutputCustomPresetFormatMp4OutputFileArgs{
									Labels: pulumi.StringArray{
										pulumi.String("string"),
									},
								},
							},
						},
						Png: &media.TransformOutputCustomPresetFormatPngArgs{
							FilenamePattern: pulumi.String("string"),
						},
						TransportStream: &media.TransformOutputCustomPresetFormatTransportStreamArgs{
							FilenamePattern: pulumi.String("string"),
							OutputFiles: media.TransformOutputCustomPresetFormatTransportStreamOutputFileArray{
								&media.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs{
									Labels: pulumi.StringArray{
										pulumi.String("string"),
									},
								},
							},
						},
					},
				},
				ExperimentalOptions: pulumi.StringMap{
					"string": pulumi.String("string"),
				},
				Filter: &media.TransformOutputCustomPresetFilterArgs{
					CropRectangle: &media.TransformOutputCustomPresetFilterCropRectangleArgs{
						Height: pulumi.String("string"),
						Left:   pulumi.String("string"),
						Top:    pulumi.String("string"),
						Width:  pulumi.String("string"),
					},
					Deinterlace: &media.TransformOutputCustomPresetFilterDeinterlaceArgs{
						Mode:   pulumi.String("string"),
						Parity: pulumi.String("string"),
					},
					FadeIn: &media.TransformOutputCustomPresetFilterFadeInArgs{
						Duration:  pulumi.String("string"),
						FadeColor: pulumi.String("string"),
						Start:     pulumi.String("string"),
					},
					FadeOut: &media.TransformOutputCustomPresetFilterFadeOutArgs{
						Duration:  pulumi.String("string"),
						FadeColor: pulumi.String("string"),
						Start:     pulumi.String("string"),
					},
					Overlays: media.TransformOutputCustomPresetFilterOverlayArray{
						&media.TransformOutputCustomPresetFilterOverlayArgs{
							Audio: &media.TransformOutputCustomPresetFilterOverlayAudioArgs{
								InputLabel:      pulumi.String("string"),
								AudioGainLevel:  pulumi.Float64(0),
								End:             pulumi.String("string"),
								FadeInDuration:  pulumi.String("string"),
								FadeOutDuration: pulumi.String("string"),
								Start:           pulumi.String("string"),
							},
							Video: &media.TransformOutputCustomPresetFilterOverlayVideoArgs{
								InputLabel:     pulumi.String("string"),
								AudioGainLevel: pulumi.Float64(0),
								CropRectangle: &media.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs{
									Height: pulumi.String("string"),
									Left:   pulumi.String("string"),
									Top:    pulumi.String("string"),
									Width:  pulumi.String("string"),
								},
								End:             pulumi.String("string"),
								FadeInDuration:  pulumi.String("string"),
								FadeOutDuration: pulumi.String("string"),
								Opacity:         pulumi.Float64(0),
								Position: &media.TransformOutputCustomPresetFilterOverlayVideoPositionArgs{
									Height: pulumi.String("string"),
									Left:   pulumi.String("string"),
									Top:    pulumi.String("string"),
									Width:  pulumi.String("string"),
								},
								Start: pulumi.String("string"),
							},
						},
					},
					Rotation: pulumi.String("string"),
				},
			},
			OnErrorAction:    pulumi.String("string"),
			RelativePriority: pulumi.String("string"),
		},
	},
})
var transformResource = new Transform("transformResource", TransformArgs.builder()
    .mediaServicesAccountName("string")
    .resourceGroupName("string")
    .description("string")
    .name("string")
    .outputs(TransformOutputArgs.builder()
        .audioAnalyzerPreset(TransformOutputAudioAnalyzerPresetArgs.builder()
            .audioAnalysisMode("string")
            .audioLanguage("string")
            .experimentalOptions(Map.of("string", "string"))
            .build())
        .builtinPreset(TransformOutputBuiltinPresetArgs.builder()
            .presetName("string")
            .presetConfiguration(TransformOutputBuiltinPresetPresetConfigurationArgs.builder()
                .complexity("string")
                .interleaveOutput("string")
                .keyFrameIntervalInSeconds(0)
                .maxBitrateBps(0)
                .maxHeight(0)
                .maxLayers(0)
                .minBitrateBps(0)
                .minHeight(0)
                .build())
            .build())
        .customPreset(TransformOutputCustomPresetArgs.builder()
            .codecs(TransformOutputCustomPresetCodecArgs.builder()
                .aacAudio(TransformOutputCustomPresetCodecAacAudioArgs.builder()
                    .bitrate(0)
                    .channels(0)
                    .label("string")
                    .profile("string")
                    .samplingRate(0)
                    .build())
                .copyAudio(TransformOutputCustomPresetCodecCopyAudioArgs.builder()
                    .label("string")
                    .build())
                .copyVideo(TransformOutputCustomPresetCodecCopyVideoArgs.builder()
                    .label("string")
                    .build())
                .ddAudio(TransformOutputCustomPresetCodecDdAudioArgs.builder()
                    .bitrate(0)
                    .channels(0)
                    .label("string")
                    .samplingRate(0)
                    .build())
                .h264Video(TransformOutputCustomPresetCodecH264VideoArgs.builder()
                    .complexity("string")
                    .keyFrameInterval("string")
                    .label("string")
                    .layers(TransformOutputCustomPresetCodecH264VideoLayerArgs.builder()
                        .bitrate(0)
                        .frameRate("string")
                        .label("string")
                        .bufferWindow("string")
                        .crf(0)
                        .entropyMode("string")
                        .adaptiveBFrameEnabled(false)
                        .height("string")
                        .bFrames(0)
                        .level("string")
                        .maxBitrate(0)
                        .profile("string")
                        .referenceFrames(0)
                        .slices(0)
                        .width("string")
                        .build())
                    .rateControlMode("string")
                    .sceneChangeDetectionEnabled(false)
                    .stretchMode("string")
                    .syncMode("string")
                    .build())
                .h265Video(TransformOutputCustomPresetCodecH265VideoArgs.builder()
                    .complexity("string")
                    .keyFrameInterval("string")
                    .label("string")
                    .layers(TransformOutputCustomPresetCodecH265VideoLayerArgs.builder()
                        .bitrate(0)
                        .height("string")
                        .bFrames(0)
                        .bufferWindow("string")
                        .crf(0)
                        .frameRate("string")
                        .adaptiveBFrameEnabled(false)
                        .label("string")
                        .level("string")
                        .maxBitrate(0)
                        .profile("string")
                        .referenceFrames(0)
                        .slices(0)
                        .width("string")
                        .build())
                    .sceneChangeDetectionEnabled(false)
                    .stretchMode("string")
                    .syncMode("string")
                    .build())
                .jpgImage(TransformOutputCustomPresetCodecJpgImageArgs.builder()
                    .start("string")
                    .keyFrameInterval("string")
                    .label("string")
                    .layers(TransformOutputCustomPresetCodecJpgImageLayerArgs.builder()
                        .height("string")
                        .label("string")
                        .quality(0)
                        .width("string")
                        .build())
                    .range("string")
                    .spriteColumn(0)
                    .step("string")
                    .stretchMode("string")
                    .syncMode("string")
                    .build())
                .pngImage(TransformOutputCustomPresetCodecPngImageArgs.builder()
                    .start("string")
                    .keyFrameInterval("string")
                    .label("string")
                    .layers(TransformOutputCustomPresetCodecPngImageLayerArgs.builder()
                        .height("string")
                        .label("string")
                        .width("string")
                        .build())
                    .range("string")
                    .step("string")
                    .stretchMode("string")
                    .syncMode("string")
                    .build())
                .build())
            .formats(TransformOutputCustomPresetFormatArgs.builder()
                .jpg(TransformOutputCustomPresetFormatJpgArgs.builder()
                    .filenamePattern("string")
                    .build())
                .mp4(TransformOutputCustomPresetFormatMp4Args.builder()
                    .filenamePattern("string")
                    .outputFiles(TransformOutputCustomPresetFormatMp4OutputFileArgs.builder()
                        .labels("string")
                        .build())
                    .build())
                .png(TransformOutputCustomPresetFormatPngArgs.builder()
                    .filenamePattern("string")
                    .build())
                .transportStream(TransformOutputCustomPresetFormatTransportStreamArgs.builder()
                    .filenamePattern("string")
                    .outputFiles(TransformOutputCustomPresetFormatTransportStreamOutputFileArgs.builder()
                        .labels("string")
                        .build())
                    .build())
                .build())
            .experimentalOptions(Map.of("string", "string"))
            .filter(TransformOutputCustomPresetFilterArgs.builder()
                .cropRectangle(TransformOutputCustomPresetFilterCropRectangleArgs.builder()
                    .height("string")
                    .left("string")
                    .top("string")
                    .width("string")
                    .build())
                .deinterlace(TransformOutputCustomPresetFilterDeinterlaceArgs.builder()
                    .mode("string")
                    .parity("string")
                    .build())
                .fadeIn(TransformOutputCustomPresetFilterFadeInArgs.builder()
                    .duration("string")
                    .fadeColor("string")
                    .start("string")
                    .build())
                .fadeOut(TransformOutputCustomPresetFilterFadeOutArgs.builder()
                    .duration("string")
                    .fadeColor("string")
                    .start("string")
                    .build())
                .overlays(TransformOutputCustomPresetFilterOverlayArgs.builder()
                    .audio(TransformOutputCustomPresetFilterOverlayAudioArgs.builder()
                        .inputLabel("string")
                        .audioGainLevel(0)
                        .end("string")
                        .fadeInDuration("string")
                        .fadeOutDuration("string")
                        .start("string")
                        .build())
                    .video(TransformOutputCustomPresetFilterOverlayVideoArgs.builder()
                        .inputLabel("string")
                        .audioGainLevel(0)
                        .cropRectangle(TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs.builder()
                            .height("string")
                            .left("string")
                            .top("string")
                            .width("string")
                            .build())
                        .end("string")
                        .fadeInDuration("string")
                        .fadeOutDuration("string")
                        .opacity(0)
                        .position(TransformOutputCustomPresetFilterOverlayVideoPositionArgs.builder()
                            .height("string")
                            .left("string")
                            .top("string")
                            .width("string")
                            .build())
                        .start("string")
                        .build())
                    .build())
                .rotation("string")
                .build())
            .build())
        .onErrorAction("string")
        .relativePriority("string")
        .build())
    .build());
transform_resource = azure.media.Transform("transformResource",
    media_services_account_name="string",
    resource_group_name="string",
    description="string",
    name="string",
    outputs=[azure.media.TransformOutputArgs(
        audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs(
            audio_analysis_mode="string",
            audio_language="string",
            experimental_options={
                "string": "string",
            },
        ),
        builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
            preset_name="string",
            preset_configuration=azure.media.TransformOutputBuiltinPresetPresetConfigurationArgs(
                complexity="string",
                interleave_output="string",
                key_frame_interval_in_seconds=0,
                max_bitrate_bps=0,
                max_height=0,
                max_layers=0,
                min_bitrate_bps=0,
                min_height=0,
            ),
        ),
        custom_preset=azure.media.TransformOutputCustomPresetArgs(
            codecs=[azure.media.TransformOutputCustomPresetCodecArgs(
                aac_audio=azure.media.TransformOutputCustomPresetCodecAacAudioArgs(
                    bitrate=0,
                    channels=0,
                    label="string",
                    profile="string",
                    sampling_rate=0,
                ),
                copy_audio=azure.media.TransformOutputCustomPresetCodecCopyAudioArgs(
                    label="string",
                ),
                copy_video=azure.media.TransformOutputCustomPresetCodecCopyVideoArgs(
                    label="string",
                ),
                dd_audio=azure.media.TransformOutputCustomPresetCodecDdAudioArgs(
                    bitrate=0,
                    channels=0,
                    label="string",
                    sampling_rate=0,
                ),
                h264_video=azure.media.TransformOutputCustomPresetCodecH264VideoArgs(
                    complexity="string",
                    key_frame_interval="string",
                    label="string",
                    layers=[azure.media.TransformOutputCustomPresetCodecH264VideoLayerArgs(
                        bitrate=0,
                        frame_rate="string",
                        label="string",
                        buffer_window="string",
                        crf=0,
                        entropy_mode="string",
                        adaptive_b_frame_enabled=False,
                        height="string",
                        b_frames=0,
                        level="string",
                        max_bitrate=0,
                        profile="string",
                        reference_frames=0,
                        slices=0,
                        width="string",
                    )],
                    rate_control_mode="string",
                    scene_change_detection_enabled=False,
                    stretch_mode="string",
                    sync_mode="string",
                ),
                h265_video=azure.media.TransformOutputCustomPresetCodecH265VideoArgs(
                    complexity="string",
                    key_frame_interval="string",
                    label="string",
                    layers=[azure.media.TransformOutputCustomPresetCodecH265VideoLayerArgs(
                        bitrate=0,
                        height="string",
                        b_frames=0,
                        buffer_window="string",
                        crf=0,
                        frame_rate="string",
                        adaptive_b_frame_enabled=False,
                        label="string",
                        level="string",
                        max_bitrate=0,
                        profile="string",
                        reference_frames=0,
                        slices=0,
                        width="string",
                    )],
                    scene_change_detection_enabled=False,
                    stretch_mode="string",
                    sync_mode="string",
                ),
                jpg_image=azure.media.TransformOutputCustomPresetCodecJpgImageArgs(
                    start="string",
                    key_frame_interval="string",
                    label="string",
                    layers=[azure.media.TransformOutputCustomPresetCodecJpgImageLayerArgs(
                        height="string",
                        label="string",
                        quality=0,
                        width="string",
                    )],
                    range="string",
                    sprite_column=0,
                    step="string",
                    stretch_mode="string",
                    sync_mode="string",
                ),
                png_image=azure.media.TransformOutputCustomPresetCodecPngImageArgs(
                    start="string",
                    key_frame_interval="string",
                    label="string",
                    layers=[azure.media.TransformOutputCustomPresetCodecPngImageLayerArgs(
                        height="string",
                        label="string",
                        width="string",
                    )],
                    range="string",
                    step="string",
                    stretch_mode="string",
                    sync_mode="string",
                ),
            )],
            formats=[azure.media.TransformOutputCustomPresetFormatArgs(
                jpg=azure.media.TransformOutputCustomPresetFormatJpgArgs(
                    filename_pattern="string",
                ),
                mp4=azure.media.TransformOutputCustomPresetFormatMp4Args(
                    filename_pattern="string",
                    output_files=[azure.media.TransformOutputCustomPresetFormatMp4OutputFileArgs(
                        labels=["string"],
                    )],
                ),
                png=azure.media.TransformOutputCustomPresetFormatPngArgs(
                    filename_pattern="string",
                ),
                transport_stream=azure.media.TransformOutputCustomPresetFormatTransportStreamArgs(
                    filename_pattern="string",
                    output_files=[azure.media.TransformOutputCustomPresetFormatTransportStreamOutputFileArgs(
                        labels=["string"],
                    )],
                ),
            )],
            experimental_options={
                "string": "string",
            },
            filter=azure.media.TransformOutputCustomPresetFilterArgs(
                crop_rectangle=azure.media.TransformOutputCustomPresetFilterCropRectangleArgs(
                    height="string",
                    left="string",
                    top="string",
                    width="string",
                ),
                deinterlace=azure.media.TransformOutputCustomPresetFilterDeinterlaceArgs(
                    mode="string",
                    parity="string",
                ),
                fade_in=azure.media.TransformOutputCustomPresetFilterFadeInArgs(
                    duration="string",
                    fade_color="string",
                    start="string",
                ),
                fade_out=azure.media.TransformOutputCustomPresetFilterFadeOutArgs(
                    duration="string",
                    fade_color="string",
                    start="string",
                ),
                overlays=[azure.media.TransformOutputCustomPresetFilterOverlayArgs(
                    audio=azure.media.TransformOutputCustomPresetFilterOverlayAudioArgs(
                        input_label="string",
                        audio_gain_level=0,
                        end="string",
                        fade_in_duration="string",
                        fade_out_duration="string",
                        start="string",
                    ),
                    video=azure.media.TransformOutputCustomPresetFilterOverlayVideoArgs(
                        input_label="string",
                        audio_gain_level=0,
                        crop_rectangle=azure.media.TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs(
                            height="string",
                            left="string",
                            top="string",
                            width="string",
                        ),
                        end="string",
                        fade_in_duration="string",
                        fade_out_duration="string",
                        opacity=0,
                        position=azure.media.TransformOutputCustomPresetFilterOverlayVideoPositionArgs(
                            height="string",
                            left="string",
                            top="string",
                            width="string",
                        ),
                        start="string",
                    ),
                )],
                rotation="string",
            ),
        ),
        on_error_action="string",
        relative_priority="string",
    )])
const transformResource = new azure.media.Transform("transformResource", {
    mediaServicesAccountName: "string",
    resourceGroupName: "string",
    description: "string",
    name: "string",
    outputs: [{
        audioAnalyzerPreset: {
            audioAnalysisMode: "string",
            audioLanguage: "string",
            experimentalOptions: {
                string: "string",
            },
        },
        builtinPreset: {
            presetName: "string",
            presetConfiguration: {
                complexity: "string",
                interleaveOutput: "string",
                keyFrameIntervalInSeconds: 0,
                maxBitrateBps: 0,
                maxHeight: 0,
                maxLayers: 0,
                minBitrateBps: 0,
                minHeight: 0,
            },
        },
        customPreset: {
            codecs: [{
                aacAudio: {
                    bitrate: 0,
                    channels: 0,
                    label: "string",
                    profile: "string",
                    samplingRate: 0,
                },
                copyAudio: {
                    label: "string",
                },
                copyVideo: {
                    label: "string",
                },
                ddAudio: {
                    bitrate: 0,
                    channels: 0,
                    label: "string",
                    samplingRate: 0,
                },
                h264Video: {
                    complexity: "string",
                    keyFrameInterval: "string",
                    label: "string",
                    layers: [{
                        bitrate: 0,
                        frameRate: "string",
                        label: "string",
                        bufferWindow: "string",
                        crf: 0,
                        entropyMode: "string",
                        adaptiveBFrameEnabled: false,
                        height: "string",
                        bFrames: 0,
                        level: "string",
                        maxBitrate: 0,
                        profile: "string",
                        referenceFrames: 0,
                        slices: 0,
                        width: "string",
                    }],
                    rateControlMode: "string",
                    sceneChangeDetectionEnabled: false,
                    stretchMode: "string",
                    syncMode: "string",
                },
                h265Video: {
                    complexity: "string",
                    keyFrameInterval: "string",
                    label: "string",
                    layers: [{
                        bitrate: 0,
                        height: "string",
                        bFrames: 0,
                        bufferWindow: "string",
                        crf: 0,
                        frameRate: "string",
                        adaptiveBFrameEnabled: false,
                        label: "string",
                        level: "string",
                        maxBitrate: 0,
                        profile: "string",
                        referenceFrames: 0,
                        slices: 0,
                        width: "string",
                    }],
                    sceneChangeDetectionEnabled: false,
                    stretchMode: "string",
                    syncMode: "string",
                },
                jpgImage: {
                    start: "string",
                    keyFrameInterval: "string",
                    label: "string",
                    layers: [{
                        height: "string",
                        label: "string",
                        quality: 0,
                        width: "string",
                    }],
                    range: "string",
                    spriteColumn: 0,
                    step: "string",
                    stretchMode: "string",
                    syncMode: "string",
                },
                pngImage: {
                    start: "string",
                    keyFrameInterval: "string",
                    label: "string",
                    layers: [{
                        height: "string",
                        label: "string",
                        width: "string",
                    }],
                    range: "string",
                    step: "string",
                    stretchMode: "string",
                    syncMode: "string",
                },
            }],
            formats: [{
                jpg: {
                    filenamePattern: "string",
                },
                mp4: {
                    filenamePattern: "string",
                    outputFiles: [{
                        labels: ["string"],
                    }],
                },
                png: {
                    filenamePattern: "string",
                },
                transportStream: {
                    filenamePattern: "string",
                    outputFiles: [{
                        labels: ["string"],
                    }],
                },
            }],
            experimentalOptions: {
                string: "string",
            },
            filter: {
                cropRectangle: {
                    height: "string",
                    left: "string",
                    top: "string",
                    width: "string",
                },
                deinterlace: {
                    mode: "string",
                    parity: "string",
                },
                fadeIn: {
                    duration: "string",
                    fadeColor: "string",
                    start: "string",
                },
                fadeOut: {
                    duration: "string",
                    fadeColor: "string",
                    start: "string",
                },
                overlays: [{
                    audio: {
                        inputLabel: "string",
                        audioGainLevel: 0,
                        end: "string",
                        fadeInDuration: "string",
                        fadeOutDuration: "string",
                        start: "string",
                    },
                    video: {
                        inputLabel: "string",
                        audioGainLevel: 0,
                        cropRectangle: {
                            height: "string",
                            left: "string",
                            top: "string",
                            width: "string",
                        },
                        end: "string",
                        fadeInDuration: "string",
                        fadeOutDuration: "string",
                        opacity: 0,
                        position: {
                            height: "string",
                            left: "string",
                            top: "string",
                            width: "string",
                        },
                        start: "string",
                    },
                }],
                rotation: "string",
            },
        },
        onErrorAction: "string",
        relativePriority: "string",
    }],
});
type: azure:media:Transform
properties:
    description: string
    mediaServicesAccountName: string
    name: string
    outputs:
        - audioAnalyzerPreset:
            audioAnalysisMode: string
            audioLanguage: string
            experimentalOptions:
                string: string
          builtinPreset:
            presetConfiguration:
                complexity: string
                interleaveOutput: string
                keyFrameIntervalInSeconds: 0
                maxBitrateBps: 0
                maxHeight: 0
                maxLayers: 0
                minBitrateBps: 0
                minHeight: 0
            presetName: string
          customPreset:
            codecs:
                - aacAudio:
                    bitrate: 0
                    channels: 0
                    label: string
                    profile: string
                    samplingRate: 0
                  copyAudio:
                    label: string
                  copyVideo:
                    label: string
                  ddAudio:
                    bitrate: 0
                    channels: 0
                    label: string
                    samplingRate: 0
                  h264Video:
                    complexity: string
                    keyFrameInterval: string
                    label: string
                    layers:
                        - adaptiveBFrameEnabled: false
                          bFrames: 0
                          bitrate: 0
                          bufferWindow: string
                          crf: 0
                          entropyMode: string
                          frameRate: string
                          height: string
                          label: string
                          level: string
                          maxBitrate: 0
                          profile: string
                          referenceFrames: 0
                          slices: 0
                          width: string
                    rateControlMode: string
                    sceneChangeDetectionEnabled: false
                    stretchMode: string
                    syncMode: string
                  h265Video:
                    complexity: string
                    keyFrameInterval: string
                    label: string
                    layers:
                        - adaptiveBFrameEnabled: false
                          bFrames: 0
                          bitrate: 0
                          bufferWindow: string
                          crf: 0
                          frameRate: string
                          height: string
                          label: string
                          level: string
                          maxBitrate: 0
                          profile: string
                          referenceFrames: 0
                          slices: 0
                          width: string
                    sceneChangeDetectionEnabled: false
                    stretchMode: string
                    syncMode: string
                  jpgImage:
                    keyFrameInterval: string
                    label: string
                    layers:
                        - height: string
                          label: string
                          quality: 0
                          width: string
                    range: string
                    spriteColumn: 0
                    start: string
                    step: string
                    stretchMode: string
                    syncMode: string
                  pngImage:
                    keyFrameInterval: string
                    label: string
                    layers:
                        - height: string
                          label: string
                          width: string
                    range: string
                    start: string
                    step: string
                    stretchMode: string
                    syncMode: string
            experimentalOptions:
                string: string
            filter:
                cropRectangle:
                    height: string
                    left: string
                    top: string
                    width: string
                deinterlace:
                    mode: string
                    parity: string
                fadeIn:
                    duration: string
                    fadeColor: string
                    start: string
                fadeOut:
                    duration: string
                    fadeColor: string
                    start: string
                overlays:
                    - audio:
                        audioGainLevel: 0
                        end: string
                        fadeInDuration: string
                        fadeOutDuration: string
                        inputLabel: string
                        start: string
                      video:
                        audioGainLevel: 0
                        cropRectangle:
                            height: string
                            left: string
                            top: string
                            width: string
                        end: string
                        fadeInDuration: string
                        fadeOutDuration: string
                        inputLabel: string
                        opacity: 0
                        position:
                            height: string
                            left: string
                            top: string
                            width: string
                        start: string
                rotation: string
            formats:
                - jpg:
                    filenamePattern: string
                  mp4:
                    filenamePattern: string
                    outputFiles:
                        - labels:
                            - string
                  png:
                    filenamePattern: string
                  transportStream:
                    filenamePattern: string
                    outputFiles:
                        - labels:
                            - string
          onErrorAction: string
          relativePriority: string
    resourceGroupName: string
Transform Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Transform resource accepts the following input properties:
- Media
Services stringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - Resource
Group stringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 - Description string
 - An optional verbose description of the Transform.
 - Name string
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - Outputs
List<Transform
Output>  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. 
- Media
Services stringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - Resource
Group stringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 - Description string
 - An optional verbose description of the Transform.
 - Name string
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - Outputs
[]Transform
Output Type Args  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. 
- media
Services StringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - resource
Group StringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 - description String
 - An optional verbose description of the Transform.
 - name String
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - outputs
List<Transform
Output>  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. 
- media
Services stringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - resource
Group stringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 - description string
 - An optional verbose description of the Transform.
 - name string
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - outputs
Transform
Output[]  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. 
- media_
services_ straccount_ name  - The Media Services account name. Changing this forces a new Transform to be created.
 - resource_
group_ strname  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 - description str
 - An optional verbose description of the Transform.
 - name str
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - outputs
Sequence[Transform
Output Args]  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. 
- media
Services StringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - resource
Group StringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 - description String
 - An optional verbose description of the Transform.
 - name String
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - outputs List<Property Map>
 - One or more 
outputblocks as defined below. At least oneoutputmust be defined. 
Outputs
All input properties are implicitly available as output properties. Additionally, the Transform resource produces the following output properties:
- Id string
 - The provider-assigned unique ID for this managed resource.
 
- Id string
 - The provider-assigned unique ID for this managed resource.
 
- id String
 - The provider-assigned unique ID for this managed resource.
 
- id string
 - The provider-assigned unique ID for this managed resource.
 
- id str
 - The provider-assigned unique ID for this managed resource.
 
- id String
 - The provider-assigned unique ID for this managed resource.
 
Look up Existing Transform Resource
Get an existing Transform resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: TransformState, opts?: CustomResourceOptions): Transform@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        description: Optional[str] = None,
        media_services_account_name: Optional[str] = None,
        name: Optional[str] = None,
        outputs: Optional[Sequence[TransformOutputArgs]] = None,
        resource_group_name: Optional[str] = None) -> Transformfunc GetTransform(ctx *Context, name string, id IDInput, state *TransformState, opts ...ResourceOption) (*Transform, error)public static Transform Get(string name, Input<string> id, TransformState? state, CustomResourceOptions? opts = null)public static Transform get(String name, Output<String> id, TransformState state, CustomResourceOptions options)Resource lookup is not supported in YAML- name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 - state
 - Any extra arguments used during the lookup.
 - opts
 - A bag of options that control this resource's behavior.
 
- resource_name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 
- name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 - state
 - Any extra arguments used during the lookup.
 - opts
 - A bag of options that control this resource's behavior.
 
- name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 - state
 - Any extra arguments used during the lookup.
 - opts
 - A bag of options that control this resource's behavior.
 
- name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 - state
 - Any extra arguments used during the lookup.
 - opts
 - A bag of options that control this resource's behavior.
 
- Description string
 - An optional verbose description of the Transform.
 - Media
Services stringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - Name string
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - Outputs
List<Transform
Output>  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. - Resource
Group stringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 
- Description string
 - An optional verbose description of the Transform.
 - Media
Services stringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - Name string
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - Outputs
[]Transform
Output Type Args  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. - Resource
Group stringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 
- description String
 - An optional verbose description of the Transform.
 - media
Services StringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - name String
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - outputs
List<Transform
Output>  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. - resource
Group StringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 
- description string
 - An optional verbose description of the Transform.
 - media
Services stringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - name string
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - outputs
Transform
Output[]  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. - resource
Group stringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 
- description str
 - An optional verbose description of the Transform.
 - media_
services_ straccount_ name  - The Media Services account name. Changing this forces a new Transform to be created.
 - name str
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - outputs
Sequence[Transform
Output Args]  - One or more 
outputblocks as defined below. At least oneoutputmust be defined. - resource_
group_ strname  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 
- description String
 - An optional verbose description of the Transform.
 - media
Services StringAccount Name  - The Media Services account name. Changing this forces a new Transform to be created.
 - name String
 - The name which should be used for this Transform. Changing this forces a new Transform to be created.
 - outputs List<Property Map>
 - One or more 
outputblocks as defined below. At least oneoutputmust be defined. - resource
Group StringName  - The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
 
Supporting Types
TransformOutput, TransformOutputArgs    
- Audio
Analyzer TransformPreset Output Audio Analyzer Preset  - An 
audio_analyzer_presetblock as defined above. - Builtin
Preset TransformOutput Builtin Preset  - A 
builtin_presetblock as defined above. - Custom
Preset TransformOutput Custom Preset  - A 
custom_presetblock as defined above. - Face
Detector TransformPreset Output Face Detector Preset  - A 
face_detector_presetblock as defined above. - On
Error stringAction  - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with 
ContinueJob. Possible values areStopProcessingJoborContinueJob. Defaults toStopProcessingJob. - Relative
Priority string - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are 
High,NormalorLow. Defaults toNormal. - Video
Analyzer TransformPreset Output Video Analyzer Preset  A
video_analyzer_presetblock as defined below.NOTE: Each output can only have one type of preset:
builtin_preset,audio_analyzer_preset,custom_preset,face_detector_presetorvideo_analyzer_preset. If you need to apply different presets you must create one output for each one.
- Audio
Analyzer TransformPreset Output Audio Analyzer Preset  - An 
audio_analyzer_presetblock as defined above. - Builtin
Preset TransformOutput Builtin Preset  - A 
builtin_presetblock as defined above. - Custom
Preset TransformOutput Custom Preset  - A 
custom_presetblock as defined above. - Face
Detector TransformPreset Output Face Detector Preset  - A 
face_detector_presetblock as defined above. - On
Error stringAction  - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with 
ContinueJob. Possible values areStopProcessingJoborContinueJob. Defaults toStopProcessingJob. - Relative
Priority string - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are 
High,NormalorLow. Defaults toNormal. - Video
Analyzer TransformPreset Output Video Analyzer Preset  A
video_analyzer_presetblock as defined below.NOTE: Each output can only have one type of preset:
builtin_preset,audio_analyzer_preset,custom_preset,face_detector_presetorvideo_analyzer_preset. If you need to apply different presets you must create one output for each one.
- audio
Analyzer TransformPreset Output Audio Analyzer Preset  - An 
audio_analyzer_presetblock as defined above. - builtin
Preset TransformOutput Builtin Preset  - A 
builtin_presetblock as defined above. - custom
Preset TransformOutput Custom Preset  - A 
custom_presetblock as defined above. - face
Detector TransformPreset Output Face Detector Preset  - A 
face_detector_presetblock as defined above. - on
Error StringAction  - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with 
ContinueJob. Possible values areStopProcessingJoborContinueJob. Defaults toStopProcessingJob. - relative
Priority String - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are 
High,NormalorLow. Defaults toNormal. - video
Analyzer TransformPreset Output Video Analyzer Preset  A
video_analyzer_presetblock as defined below.NOTE: Each output can only have one type of preset:
builtin_preset,audio_analyzer_preset,custom_preset,face_detector_presetorvideo_analyzer_preset. If you need to apply different presets you must create one output for each one.
- audio
Analyzer TransformPreset Output Audio Analyzer Preset  - An 
audio_analyzer_presetblock as defined above. - builtin
Preset TransformOutput Builtin Preset  - A 
builtin_presetblock as defined above. - custom
Preset TransformOutput Custom Preset  - A 
custom_presetblock as defined above. - face
Detector TransformPreset Output Face Detector Preset  - A 
face_detector_presetblock as defined above. - on
Error stringAction  - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with 
ContinueJob. Possible values areStopProcessingJoborContinueJob. Defaults toStopProcessingJob. - relative
Priority string - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are 
High,NormalorLow. Defaults toNormal. - video
Analyzer TransformPreset Output Video Analyzer Preset  A
video_analyzer_presetblock as defined below.NOTE: Each output can only have one type of preset:
builtin_preset,audio_analyzer_preset,custom_preset,face_detector_presetorvideo_analyzer_preset. If you need to apply different presets you must create one output for each one.
- audio_
analyzer_ Transformpreset Output Audio Analyzer Preset  - An 
audio_analyzer_presetblock as defined above. - builtin_
preset TransformOutput Builtin Preset  - A 
builtin_presetblock as defined above. - custom_
preset TransformOutput Custom Preset  - A 
custom_presetblock as defined above. - face_
detector_ Transformpreset Output Face Detector Preset  - A 
face_detector_presetblock as defined above. - on_
error_ straction  - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with 
ContinueJob. Possible values areStopProcessingJoborContinueJob. Defaults toStopProcessingJob. - relative_
priority str - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are 
High,NormalorLow. Defaults toNormal. - video_
analyzer_ Transformpreset Output Video Analyzer Preset  A
video_analyzer_presetblock as defined below.NOTE: Each output can only have one type of preset:
builtin_preset,audio_analyzer_preset,custom_preset,face_detector_presetorvideo_analyzer_preset. If you need to apply different presets you must create one output for each one.
- audio
Analyzer Property MapPreset  - An 
audio_analyzer_presetblock as defined above. - builtin
Preset Property Map - A 
builtin_presetblock as defined above. - custom
Preset Property Map - A 
custom_presetblock as defined above. - face
Detector Property MapPreset  - A 
face_detector_presetblock as defined above. - on
Error StringAction  - A Transform can define more than one outputs. This property defines what the service should do when one output fails - either continue to produce other outputs, or, stop the other outputs. The overall Job state will not reflect failures of outputs that are specified with 
ContinueJob. Possible values areStopProcessingJoborContinueJob. Defaults toStopProcessingJob. - relative
Priority String - Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing Transform Outputs. Possible values are 
High,NormalorLow. Defaults toNormal. - video
Analyzer Property MapPreset  A
video_analyzer_presetblock as defined below.NOTE: Each output can only have one type of preset:
builtin_preset,audio_analyzer_preset,custom_preset,face_detector_presetorvideo_analyzer_preset. If you need to apply different presets you must create one output for each one.
TransformOutputAudioAnalyzerPreset, TransformOutputAudioAnalyzerPresetArgs          
- Audio
Analysis stringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - Audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - Experimental
Options Dictionary<string, string> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 
- Audio
Analysis stringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - Audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - Experimental
Options map[string]string - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 
- audio
Analysis StringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - audio
Language String - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - experimental
Options Map<String,String> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 
- audio
Analysis stringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - experimental
Options {[key: string]: string} - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 
- audio_
analysis_ strmode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - audio_
language str - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - experimental_
options Mapping[str, str] - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 
- audio
Analysis StringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - audio
Language String - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - experimental
Options Map<String> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 
TransformOutputBuiltinPreset, TransformOutputBuiltinPresetArgs        
- Preset
Name string - The built-in preset to be used for encoding videos. The Possible values are 
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,DDGoodQualityAudio,H265AdaptiveStreaming,H265ContentAwareEncoding,H265SingleBitrate4K,H265SingleBitrate1080p,H265SingleBitrate720p,H264MultipleBitrate1080p,H264MultipleBitrateSD,H264MultipleBitrate720p,H264SingleBitrate1080p,H264SingleBitrateSDandH264SingleBitrate720p. - Preset
Configuration TransformOutput Builtin Preset Preset Configuration  - A 
preset_configurationblock as defined below. 
- Preset
Name string - The built-in preset to be used for encoding videos. The Possible values are 
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,DDGoodQualityAudio,H265AdaptiveStreaming,H265ContentAwareEncoding,H265SingleBitrate4K,H265SingleBitrate1080p,H265SingleBitrate720p,H264MultipleBitrate1080p,H264MultipleBitrateSD,H264MultipleBitrate720p,H264SingleBitrate1080p,H264SingleBitrateSDandH264SingleBitrate720p. - Preset
Configuration TransformOutput Builtin Preset Preset Configuration  - A 
preset_configurationblock as defined below. 
- preset
Name String - The built-in preset to be used for encoding videos. The Possible values are 
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,DDGoodQualityAudio,H265AdaptiveStreaming,H265ContentAwareEncoding,H265SingleBitrate4K,H265SingleBitrate1080p,H265SingleBitrate720p,H264MultipleBitrate1080p,H264MultipleBitrateSD,H264MultipleBitrate720p,H264SingleBitrate1080p,H264SingleBitrateSDandH264SingleBitrate720p. - preset
Configuration TransformOutput Builtin Preset Preset Configuration  - A 
preset_configurationblock as defined below. 
- preset
Name string - The built-in preset to be used for encoding videos. The Possible values are 
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,DDGoodQualityAudio,H265AdaptiveStreaming,H265ContentAwareEncoding,H265SingleBitrate4K,H265SingleBitrate1080p,H265SingleBitrate720p,H264MultipleBitrate1080p,H264MultipleBitrateSD,H264MultipleBitrate720p,H264SingleBitrate1080p,H264SingleBitrateSDandH264SingleBitrate720p. - preset
Configuration TransformOutput Builtin Preset Preset Configuration  - A 
preset_configurationblock as defined below. 
- preset_
name str - The built-in preset to be used for encoding videos. The Possible values are 
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,DDGoodQualityAudio,H265AdaptiveStreaming,H265ContentAwareEncoding,H265SingleBitrate4K,H265SingleBitrate1080p,H265SingleBitrate720p,H264MultipleBitrate1080p,H264MultipleBitrateSD,H264MultipleBitrate720p,H264SingleBitrate1080p,H264SingleBitrateSDandH264SingleBitrate720p. - preset_
configuration TransformOutput Builtin Preset Preset Configuration  - A 
preset_configurationblock as defined below. 
- preset
Name String - The built-in preset to be used for encoding videos. The Possible values are 
AACGoodQualityAudio,AdaptiveStreaming,ContentAwareEncoding,ContentAwareEncodingExperimental,CopyAllBitrateNonInterleaved,DDGoodQualityAudio,H265AdaptiveStreaming,H265ContentAwareEncoding,H265SingleBitrate4K,H265SingleBitrate1080p,H265SingleBitrate720p,H264MultipleBitrate1080p,H264MultipleBitrateSD,H264MultipleBitrate720p,H264SingleBitrate1080p,H264SingleBitrateSDandH264SingleBitrate720p. - preset
Configuration Property Map - A 
preset_configurationblock as defined below. 
TransformOutputBuiltinPresetPresetConfiguration, TransformOutputBuiltinPresetPresetConfigurationArgs            
- Complexity string
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. - Interleave
Output string - Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are 
InterleavedOutputandNonInterleavedOutput. - Key
Frame doubleInterval In Seconds  - The key frame interval in seconds. Possible value is a positive float. For example, set as 
2.0to reduce the playback buffering for some players. - Max
Bitrate intBps  - The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 
6000000to avoid producing very high bitrate outputs for contents with high complexity. - Max
Height int - The maximum height of output video layers. For example, set as 
720to produce output layers up to 720P even if the input is 4K. - Max
Layers int - The maximum number of output video layers. For example, set as 
4to make sure at most 4 output layers are produced to control the overall cost of the encoding job. - Min
Bitrate intBps  - The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 
200000to have a bottom layer that covers users with low network bandwidth. - Min
Height int - The minimum height of output video layers. For example, set as 
360to avoid output layers of smaller resolutions like 180P. 
- Complexity string
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. - Interleave
Output string - Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are 
InterleavedOutputandNonInterleavedOutput. - Key
Frame float64Interval In Seconds  - The key frame interval in seconds. Possible value is a positive float. For example, set as 
2.0to reduce the playback buffering for some players. - Max
Bitrate intBps  - The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 
6000000to avoid producing very high bitrate outputs for contents with high complexity. - Max
Height int - The maximum height of output video layers. For example, set as 
720to produce output layers up to 720P even if the input is 4K. - Max
Layers int - The maximum number of output video layers. For example, set as 
4to make sure at most 4 output layers are produced to control the overall cost of the encoding job. - Min
Bitrate intBps  - The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 
200000to have a bottom layer that covers users with low network bandwidth. - Min
Height int - The minimum height of output video layers. For example, set as 
360to avoid output layers of smaller resolutions like 180P. 
- complexity String
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. - interleave
Output String - Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are 
InterleavedOutputandNonInterleavedOutput. - key
Frame DoubleInterval In Seconds  - The key frame interval in seconds. Possible value is a positive float. For example, set as 
2.0to reduce the playback buffering for some players. - max
Bitrate IntegerBps  - The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 
6000000to avoid producing very high bitrate outputs for contents with high complexity. - max
Height Integer - The maximum height of output video layers. For example, set as 
720to produce output layers up to 720P even if the input is 4K. - max
Layers Integer - The maximum number of output video layers. For example, set as 
4to make sure at most 4 output layers are produced to control the overall cost of the encoding job. - min
Bitrate IntegerBps  - The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 
200000to have a bottom layer that covers users with low network bandwidth. - min
Height Integer - The minimum height of output video layers. For example, set as 
360to avoid output layers of smaller resolutions like 180P. 
- complexity string
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. - interleave
Output string - Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are 
InterleavedOutputandNonInterleavedOutput. - key
Frame numberInterval In Seconds  - The key frame interval in seconds. Possible value is a positive float. For example, set as 
2.0to reduce the playback buffering for some players. - max
Bitrate numberBps  - The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 
6000000to avoid producing very high bitrate outputs for contents with high complexity. - max
Height number - The maximum height of output video layers. For example, set as 
720to produce output layers up to 720P even if the input is 4K. - max
Layers number - The maximum number of output video layers. For example, set as 
4to make sure at most 4 output layers are produced to control the overall cost of the encoding job. - min
Bitrate numberBps  - The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 
200000to have a bottom layer that covers users with low network bandwidth. - min
Height number - The minimum height of output video layers. For example, set as 
360to avoid output layers of smaller resolutions like 180P. 
- complexity str
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. - interleave_
output str - Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are 
InterleavedOutputandNonInterleavedOutput. - key_
frame_ floatinterval_ in_ seconds  - The key frame interval in seconds. Possible value is a positive float. For example, set as 
2.0to reduce the playback buffering for some players. - max_
bitrate_ intbps  - The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 
6000000to avoid producing very high bitrate outputs for contents with high complexity. - max_
height int - The maximum height of output video layers. For example, set as 
720to produce output layers up to 720P even if the input is 4K. - max_
layers int - The maximum number of output video layers. For example, set as 
4to make sure at most 4 output layers are produced to control the overall cost of the encoding job. - min_
bitrate_ intbps  - The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 
200000to have a bottom layer that covers users with low network bandwidth. - min_
height int - The minimum height of output video layers. For example, set as 
360to avoid output layers of smaller resolutions like 180P. 
- complexity String
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. - interleave
Output String - Specifies the interleave mode of the output to control how audio are stored in the container format. Possible values are 
InterleavedOutputandNonInterleavedOutput. - key
Frame NumberInterval In Seconds  - The key frame interval in seconds. Possible value is a positive float. For example, set as 
2.0to reduce the playback buffering for some players. - max
Bitrate NumberBps  - The maximum bitrate in bits per second (threshold for the top video layer). For example, set as 
6000000to avoid producing very high bitrate outputs for contents with high complexity. - max
Height Number - The maximum height of output video layers. For example, set as 
720to produce output layers up to 720P even if the input is 4K. - max
Layers Number - The maximum number of output video layers. For example, set as 
4to make sure at most 4 output layers are produced to control the overall cost of the encoding job. - min
Bitrate NumberBps  - The minimum bitrate in bits per second (threshold for the bottom video layer). For example, set as 
200000to have a bottom layer that covers users with low network bandwidth. - min
Height Number - The minimum height of output video layers. For example, set as 
360to avoid output layers of smaller resolutions like 180P. 
TransformOutputCustomPreset, TransformOutputCustomPresetArgs        
- Codecs
List<Transform
Output Custom Preset Codec>  - One or more 
codecblocks as defined above. - Formats
List<Transform
Output Custom Preset Format>  - One or more 
formatblocks as defined below. - Experimental
Options Dictionary<string, string> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - Filter
Transform
Output Custom Preset Filter  - A 
filterblock as defined below. 
- Codecs
[]Transform
Output Custom Preset Codec  - One or more 
codecblocks as defined above. - Formats
[]Transform
Output Custom Preset Format  - One or more 
formatblocks as defined below. - Experimental
Options map[string]string - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - Filter
Transform
Output Custom Preset Filter  - A 
filterblock as defined below. 
- codecs
List<Transform
Output Custom Preset Codec>  - One or more 
codecblocks as defined above. - formats
List<Transform
Output Custom Preset Format>  - One or more 
formatblocks as defined below. - experimental
Options Map<String,String> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - filter
Transform
Output Custom Preset Filter  - A 
filterblock as defined below. 
- codecs
Transform
Output Custom Preset Codec[]  - One or more 
codecblocks as defined above. - formats
Transform
Output Custom Preset Format[]  - One or more 
formatblocks as defined below. - experimental
Options {[key: string]: string} - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - filter
Transform
Output Custom Preset Filter  - A 
filterblock as defined below. 
- codecs
Sequence[Transform
Output Custom Preset Codec]  - One or more 
codecblocks as defined above. - formats
Sequence[Transform
Output Custom Preset Format]  - One or more 
formatblocks as defined below. - experimental_
options Mapping[str, str] - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - filter
Transform
Output Custom Preset Filter  - A 
filterblock as defined below. 
- codecs List<Property Map>
 - One or more 
codecblocks as defined above. - formats List<Property Map>
 - One or more 
formatblocks as defined below. - experimental
Options Map<String> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - filter Property Map
 - A 
filterblock as defined below. 
TransformOutputCustomPresetCodec, TransformOutputCustomPresetCodecArgs          
- Aac
Audio TransformOutput Custom Preset Codec Aac Audio  - A 
aac_audioblock as defined above. - Copy
Audio TransformOutput Custom Preset Codec Copy Audio  - A 
copy_audioblock as defined below. - Copy
Video TransformOutput Custom Preset Codec Copy Video  - A 
copy_videoblock as defined below. - Dd
Audio TransformOutput Custom Preset Codec Dd Audio  - A 
dd_audioblock as defined below. - H264Video
Transform
Output Custom Preset Codec H264Video  - A 
h264_videoblock as defined below. - H265Video
Transform
Output Custom Preset Codec H265Video  - A 
h265_videoblock as defined below. - Jpg
Image TransformOutput Custom Preset Codec Jpg Image  - A 
jpg_imageblock as defined below. - Png
Image TransformOutput Custom Preset Codec Png Image  A
png_imageblock as defined below.NOTE: Each codec can only have one type:
aac_audio,copy_audio,copy_video,dd_audio,h264_video,h265_video,jpg_imageorpng_image. If you need to apply different codec you must create one codec for each one.
- Aac
Audio TransformOutput Custom Preset Codec Aac Audio  - A 
aac_audioblock as defined above. - Copy
Audio TransformOutput Custom Preset Codec Copy Audio  - A 
copy_audioblock as defined below. - Copy
Video TransformOutput Custom Preset Codec Copy Video  - A 
copy_videoblock as defined below. - Dd
Audio TransformOutput Custom Preset Codec Dd Audio  - A 
dd_audioblock as defined below. - H264Video
Transform
Output Custom Preset Codec H264Video  - A 
h264_videoblock as defined below. - H265Video
Transform
Output Custom Preset Codec H265Video  - A 
h265_videoblock as defined below. - Jpg
Image TransformOutput Custom Preset Codec Jpg Image  - A 
jpg_imageblock as defined below. - Png
Image TransformOutput Custom Preset Codec Png Image  A
png_imageblock as defined below.NOTE: Each codec can only have one type:
aac_audio,copy_audio,copy_video,dd_audio,h264_video,h265_video,jpg_imageorpng_image. If you need to apply different codec you must create one codec for each one.
- aac
Audio TransformOutput Custom Preset Codec Aac Audio  - A 
aac_audioblock as defined above. - copy
Audio TransformOutput Custom Preset Codec Copy Audio  - A 
copy_audioblock as defined below. - copy
Video TransformOutput Custom Preset Codec Copy Video  - A 
copy_videoblock as defined below. - dd
Audio TransformOutput Custom Preset Codec Dd Audio  - A 
dd_audioblock as defined below. - h264Video
Transform
Output Custom Preset Codec H264Video  - A 
h264_videoblock as defined below. - h265Video
Transform
Output Custom Preset Codec H265Video  - A 
h265_videoblock as defined below. - jpg
Image TransformOutput Custom Preset Codec Jpg Image  - A 
jpg_imageblock as defined below. - png
Image TransformOutput Custom Preset Codec Png Image  A
png_imageblock as defined below.NOTE: Each codec can only have one type:
aac_audio,copy_audio,copy_video,dd_audio,h264_video,h265_video,jpg_imageorpng_image. If you need to apply different codec you must create one codec for each one.
- aac
Audio TransformOutput Custom Preset Codec Aac Audio  - A 
aac_audioblock as defined above. - copy
Audio TransformOutput Custom Preset Codec Copy Audio  - A 
copy_audioblock as defined below. - copy
Video TransformOutput Custom Preset Codec Copy Video  - A 
copy_videoblock as defined below. - dd
Audio TransformOutput Custom Preset Codec Dd Audio  - A 
dd_audioblock as defined below. - h264Video
Transform
Output Custom Preset Codec H264Video  - A 
h264_videoblock as defined below. - h265Video
Transform
Output Custom Preset Codec H265Video  - A 
h265_videoblock as defined below. - jpg
Image TransformOutput Custom Preset Codec Jpg Image  - A 
jpg_imageblock as defined below. - png
Image TransformOutput Custom Preset Codec Png Image  A
png_imageblock as defined below.NOTE: Each codec can only have one type:
aac_audio,copy_audio,copy_video,dd_audio,h264_video,h265_video,jpg_imageorpng_image. If you need to apply different codec you must create one codec for each one.
- aac_
audio TransformOutput Custom Preset Codec Aac Audio  - A 
aac_audioblock as defined above. - copy_
audio TransformOutput Custom Preset Codec Copy Audio  - A 
copy_audioblock as defined below. - copy_
video TransformOutput Custom Preset Codec Copy Video  - A 
copy_videoblock as defined below. - dd_
audio TransformOutput Custom Preset Codec Dd Audio  - A 
dd_audioblock as defined below. - h264_
video TransformOutput Custom Preset Codec H264Video  - A 
h264_videoblock as defined below. - h265_
video TransformOutput Custom Preset Codec H265Video  - A 
h265_videoblock as defined below. - jpg_
image TransformOutput Custom Preset Codec Jpg Image  - A 
jpg_imageblock as defined below. - png_
image TransformOutput Custom Preset Codec Png Image  A
png_imageblock as defined below.NOTE: Each codec can only have one type:
aac_audio,copy_audio,copy_video,dd_audio,h264_video,h265_video,jpg_imageorpng_image. If you need to apply different codec you must create one codec for each one.
- aac
Audio Property Map - A 
aac_audioblock as defined above. - copy
Audio Property Map - A 
copy_audioblock as defined below. - copy
Video Property Map - A 
copy_videoblock as defined below. - dd
Audio Property Map - A 
dd_audioblock as defined below. - h264Video Property Map
 - A 
h264_videoblock as defined below. - h265Video Property Map
 - A 
h265_videoblock as defined below. - jpg
Image Property Map - A 
jpg_imageblock as defined below. - png
Image Property Map A
png_imageblock as defined below.NOTE: Each codec can only have one type:
aac_audio,copy_audio,copy_video,dd_audio,h264_video,h265_video,jpg_imageorpng_image. If you need to apply different codec you must create one codec for each one.
TransformOutputCustomPresetCodecAacAudio, TransformOutputCustomPresetCodecAacAudioArgs              
- Bitrate int
 - The bitrate of the audio in bits per second. Default to 
128000. - Channels int
 - The number of audio channels. Default to 
2. - Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - Profile string
 - The encoding profile to be used when encoding audio with AAC. Possible values are 
AacLc,HeAacV1,andHeAacV2. Default toAacLc. - Sampling
Rate int - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- Bitrate int
 - The bitrate of the audio in bits per second. Default to 
128000. - Channels int
 - The number of audio channels. Default to 
2. - Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - Profile string
 - The encoding profile to be used when encoding audio with AAC. Possible values are 
AacLc,HeAacV1,andHeAacV2. Default toAacLc. - Sampling
Rate int - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- bitrate Integer
 - The bitrate of the audio in bits per second. Default to 
128000. - channels Integer
 - The number of audio channels. Default to 
2. - label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - profile String
 - The encoding profile to be used when encoding audio with AAC. Possible values are 
AacLc,HeAacV1,andHeAacV2. Default toAacLc. - sampling
Rate Integer - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- bitrate number
 - The bitrate of the audio in bits per second. Default to 
128000. - channels number
 - The number of audio channels. Default to 
2. - label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - profile string
 - The encoding profile to be used when encoding audio with AAC. Possible values are 
AacLc,HeAacV1,andHeAacV2. Default toAacLc. - sampling
Rate number - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- bitrate int
 - The bitrate of the audio in bits per second. Default to 
128000. - channels int
 - The number of audio channels. Default to 
2. - label str
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - profile str
 - The encoding profile to be used when encoding audio with AAC. Possible values are 
AacLc,HeAacV1,andHeAacV2. Default toAacLc. - sampling_
rate int - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- bitrate Number
 - The bitrate of the audio in bits per second. Default to 
128000. - channels Number
 - The number of audio channels. Default to 
2. - label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - profile String
 - The encoding profile to be used when encoding audio with AAC. Possible values are 
AacLc,HeAacV1,andHeAacV2. Default toAacLc. - sampling
Rate Number - The sampling rate to use for encoding in Hertz. Default to 
48000. 
TransformOutputCustomPresetCodecCopyAudio, TransformOutputCustomPresetCodecCopyAudioArgs              
- Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- label str
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
TransformOutputCustomPresetCodecCopyVideo, TransformOutputCustomPresetCodecCopyVideoArgs              
- Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- label str
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
- label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 
TransformOutputCustomPresetCodecDdAudio, TransformOutputCustomPresetCodecDdAudioArgs              
- Bitrate int
 - The bitrate of the audio in bits per second. Default to 
192000. - Channels int
 - The number of audio channels. Default to 
2. - Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - Sampling
Rate int - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- Bitrate int
 - The bitrate of the audio in bits per second. Default to 
192000. - Channels int
 - The number of audio channels. Default to 
2. - Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - Sampling
Rate int - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- bitrate Integer
 - The bitrate of the audio in bits per second. Default to 
192000. - channels Integer
 - The number of audio channels. Default to 
2. - label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - sampling
Rate Integer - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- bitrate number
 - The bitrate of the audio in bits per second. Default to 
192000. - channels number
 - The number of audio channels. Default to 
2. - label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - sampling
Rate number - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- bitrate int
 - The bitrate of the audio in bits per second. Default to 
192000. - channels int
 - The number of audio channels. Default to 
2. - label str
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - sampling_
rate int - The sampling rate to use for encoding in Hertz. Default to 
48000. 
- bitrate Number
 - The bitrate of the audio in bits per second. Default to 
192000. - channels Number
 - The number of audio channels. Default to 
2. - label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - sampling
Rate Number - The sampling rate to use for encoding in Hertz. Default to 
48000. 
TransformOutputCustomPresetCodecH264Video, TransformOutputCustomPresetCodecH264VideoArgs            
- Complexity string
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - Key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Layers
List<Transform
Output Custom Preset Codec H264Video Layer>  - One or more 
layerblocks as defined below. - Rate
Control stringMode  - The rate control mode. Possible values are 
ABR,CBRorCRF. Default toABR. - Scene
Change boolDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - Stretch
Mode string - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - Sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- Complexity string
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - Key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Layers
[]Transform
Output Custom Preset Codec H264Video Layer  - One or more 
layerblocks as defined below. - Rate
Control stringMode  - The rate control mode. Possible values are 
ABR,CBRorCRF. Default toABR. - Scene
Change boolDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - Stretch
Mode string - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - Sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- complexity String
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - key
Frame StringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers
List<Transform
Output Custom Preset Codec H264Video Layer>  - One or more 
layerblocks as defined below. - rate
Control StringMode  - The rate control mode. Possible values are 
ABR,CBRorCRF. Default toABR. - scene
Change BooleanDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - stretch
Mode String - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode String - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- complexity string
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers
Transform
Output Custom Preset Codec H264Video Layer[]  - One or more 
layerblocks as defined below. - rate
Control stringMode  - The rate control mode. Possible values are 
ABR,CBRorCRF. Default toABR. - scene
Change booleanDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - stretch
Mode string - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- complexity str
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - key_
frame_ strinterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label str
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers
Sequence[Transform
Output Custom Preset Codec H264Video Layer]  - One or more 
layerblocks as defined below. - rate_
control_ strmode  - The rate control mode. Possible values are 
ABR,CBRorCRF. Default toABR. - scene_
change_ booldetection_ enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - stretch_
mode str - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync_
mode str - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- complexity String
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - key
Frame StringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers List<Property Map>
 - One or more 
layerblocks as defined below. - rate
Control StringMode  - The rate control mode. Possible values are 
ABR,CBRorCRF. Default toABR. - scene
Change BooleanDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - stretch
Mode String - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode String - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
TransformOutputCustomPresetCodecH264VideoLayer, TransformOutputCustomPresetCodecH264VideoLayerArgs              
- Bitrate int
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - Adaptive
BFrame boolEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - BFrames int
 - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - Buffer
Window string - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - Crf double
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - Entropy
Mode string - The entropy mode to be used for this layer. Possible values are 
CabacorCavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level. - Frame
Rate string - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - Height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Level string
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - Max
Bitrate int - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - Profile string
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - Reference
Frames int - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - Slices int
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - Width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- Bitrate int
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - Adaptive
BFrame boolEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - BFrames int
 - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - Buffer
Window string - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - Crf float64
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - Entropy
Mode string - The entropy mode to be used for this layer. Possible values are 
CabacorCavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level. - Frame
Rate string - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - Height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Level string
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - Max
Bitrate int - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - Profile string
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - Reference
Frames int - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - Slices int
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - Width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- bitrate Integer
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - adaptive
BFrame BooleanEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - b
Frames Integer - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - buffer
Window String - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - crf Double
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - entropy
Mode String - The entropy mode to be used for this layer. Possible values are 
CabacorCavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level. - frame
Rate String - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - height String
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - level String
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - max
Bitrate Integer - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - profile String
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - reference
Frames Integer - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - slices Integer
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - width String
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- bitrate number
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - adaptive
BFrame booleanEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - b
Frames number - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - buffer
Window string - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - crf number
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - entropy
Mode string - The entropy mode to be used for this layer. Possible values are 
CabacorCavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level. - frame
Rate string - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - level string
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - max
Bitrate number - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - profile string
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - reference
Frames number - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - slices number
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- bitrate int
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - adaptive_
b_ boolframe_ enabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - b_
frames int - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - buffer_
window str - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - crf float
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - entropy_
mode str - The entropy mode to be used for this layer. Possible values are 
CabacorCavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level. - frame_
rate str - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - height str
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label str
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - level str
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - max_
bitrate int - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - profile str
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - reference_
frames int - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - slices int
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - width str
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- bitrate Number
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - adaptive
BFrame BooleanEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - b
Frames Number - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - buffer
Window String - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - crf Number
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - entropy
Mode String - The entropy mode to be used for this layer. Possible values are 
CabacorCavlc. If not specified, the encoder chooses the mode that is appropriate for the profile and level. - frame
Rate String - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - height String
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - level String
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - max
Bitrate Number - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - profile String
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - reference
Frames Number - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - slices Number
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - width String
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
TransformOutputCustomPresetCodecH265Video, TransformOutputCustomPresetCodecH265VideoArgs            
- Complexity string
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - Key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Layers
List<Transform
Output Custom Preset Codec H265Video Layer>  - One or more 
layerblocks as defined below. - Scene
Change boolDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - Stretch
Mode string - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - Sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- Complexity string
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - Key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Layers
[]Transform
Output Custom Preset Codec H265Video Layer  - One or more 
layerblocks as defined below. - Scene
Change boolDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - Stretch
Mode string - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - Sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- complexity String
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - key
Frame StringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers
List<Transform
Output Custom Preset Codec H265Video Layer>  - One or more 
layerblocks as defined below. - scene
Change BooleanDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - stretch
Mode String - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode String - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- complexity string
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers
Transform
Output Custom Preset Codec H265Video Layer[]  - One or more 
layerblocks as defined below. - scene
Change booleanDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - stretch
Mode string - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- complexity str
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - key_
frame_ strinterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label str
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers
Sequence[Transform
Output Custom Preset Codec H265Video Layer]  - One or more 
layerblocks as defined below. - scene_
change_ booldetection_ enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - stretch_
mode str - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync_
mode str - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- complexity String
 - The complexity of the encoding. Possible values are 
Balanced,SpeedorQuality. Default toBalanced. - key
Frame StringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers List<Property Map>
 - One or more 
layerblocks as defined below. - scene
Change BooleanDetection Enabled  - Whether the encoder should insert key frames at scene changes. This flag should be set to true only when the encoder is being configured to produce a single output video. Default to 
false. - stretch
Mode String - Specifies the resizing mode - how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode String - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
TransformOutputCustomPresetCodecH265VideoLayer, TransformOutputCustomPresetCodecH265VideoLayerArgs              
- Bitrate int
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - Adaptive
BFrame boolEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - BFrames int
 - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - Buffer
Window string - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - Crf double
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - Frame
Rate string - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - Height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Level string
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - Max
Bitrate int - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - Profile string
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - Reference
Frames int - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - Slices int
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - Width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- Bitrate int
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - Adaptive
BFrame boolEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - BFrames int
 - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - Buffer
Window string - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - Crf float64
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - Frame
Rate string - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - Height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Level string
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - Max
Bitrate int - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - Profile string
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - Reference
Frames int - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - Slices int
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - Width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- bitrate Integer
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - adaptive
BFrame BooleanEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - b
Frames Integer - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - buffer
Window String - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - crf Double
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - frame
Rate String - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - height String
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - level String
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - max
Bitrate Integer - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - profile String
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - reference
Frames Integer - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - slices Integer
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - width String
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- bitrate number
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - adaptive
BFrame booleanEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - b
Frames number - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - buffer
Window string - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - crf number
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - frame
Rate string - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - level string
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - max
Bitrate number - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - profile string
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - reference
Frames number - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - slices number
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- bitrate int
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - adaptive_
b_ boolframe_ enabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - b_
frames int - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - buffer_
window str - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - crf float
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - frame_
rate str - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - height str
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label str
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - level str
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - max_
bitrate int - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - profile str
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - reference_
frames int - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - slices int
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - width str
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- bitrate Number
 - The average bitrate in bits per second at which to encode the input video when generating this layer.
 - adaptive
BFrame BooleanEnabled  - Whether adaptive B-frames are used when encoding this layer. If not specified, the encoder will turn it on whenever the video profile permits its use. Default to 
true. - b
Frames Number - The number of B-frames to use when encoding this layer. If not specified, the encoder chooses an appropriate number based on the video profile and level.
 - buffer
Window String - Specifies the maximum amount of time that the encoder should buffer frames before encoding. The value should be in ISO 8601 format. The value should be in the range 
0.1to100seconds. Defaults toPT5S. - crf Number
 - The value of CRF to be used when encoding this layer. This setting takes effect when 
rate_control_modeis setCRF. The range of CRF value is between0and51, where lower values would result in better quality, at the expense of higher file sizes. Higher values mean more compression, but at some point quality degradation will be noticed. Default to28. - frame
Rate String - The frame rate (in frames per second) at which to encode this layer. The value can be in the form of 
M/NwhereMandNare integers (For example,30000/1001), or in the form of a number (For example,30, or29.97). The encoder enforces constraints on allowed frame rates based on the profile and level. If it is not specified, the encoder will use the same frame rate as the input video. - height String
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - level String
 - The H.264 levels. Currently, the resource support Level up to 
6.2. The value can beauto, or a number that matches the H.264 profile. If not specified, the default isauto, which lets the encoder choose the Level that is appropriate for this layer. - max
Bitrate Number - The maximum bitrate (in bits per second), at which the VBV buffer should be assumed to refill. If not specified, defaults to the same value as bitrate.
 - profile String
 - The H.264 profile. Possible values are 
Auto,MainandMain10. Default toAuto. - reference
Frames Number - The number of reference frames to be used when encoding this layer. If not specified, the encoder determines an appropriate number based on the encoder complexity setting.
 - slices Number
 - The number of slices to be used when encoding this layer. If not specified, default is 
1, which means that encoder will use a single slice for each frame. - width String
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
TransformOutputCustomPresetCodecJpgImage, TransformOutputCustomPresetCodecJpgImageArgs              
- Start string
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - Key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Layers
List<Transform
Output Custom Preset Codec Jpg Image Layer>  - One or more 
layerblocks as defined below. - Range string
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at 5 minutes and 30 seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - Sprite
Column int - Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 
65535x65535. - Step string
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - Stretch
Mode string - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - Sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- Start string
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - Key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Layers
[]Transform
Output Custom Preset Codec Jpg Image Layer  - One or more 
layerblocks as defined below. - Range string
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at 5 minutes and 30 seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - Sprite
Column int - Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 
65535x65535. - Step string
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - Stretch
Mode string - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - Sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- start String
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - key
Frame StringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers
List<Transform
Output Custom Preset Codec Jpg Image Layer>  - One or more 
layerblocks as defined below. - range String
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at 5 minutes and 30 seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - sprite
Column Integer - Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 
65535x65535. - step String
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - stretch
Mode String - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode String - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- start string
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers
Transform
Output Custom Preset Codec Jpg Image Layer[]  - One or more 
layerblocks as defined below. - range string
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at 5 minutes and 30 seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - sprite
Column number - Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 
65535x65535. - step string
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - stretch
Mode string - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- start str
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - key_
frame_ strinterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label str
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers
Sequence[Transform
Output Custom Preset Codec Jpg Image Layer]  - One or more 
layerblocks as defined below. - range str
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at 5 minutes and 30 seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - sprite_
column int - Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 
65535x65535. - step str
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - stretch_
mode str - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync_
mode str - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- start String
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - key
Frame StringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - layers List<Property Map>
 - One or more 
layerblocks as defined below. - range String
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at 5 minutes and 30 seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - sprite
Column Number - Sets the number of columns used in thumbnail sprite image. The number of rows are automatically calculated and a VTT file is generated with the coordinate mappings for each thumbnail in the sprite. Note: this value should be a positive integer and a proper value is recommended so that the output image resolution will not go beyond JPEG maximum pixel resolution limit 
65535x65535. - step String
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - stretch
Mode String - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode String - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
TransformOutputCustomPresetCodecJpgImageLayer, TransformOutputCustomPresetCodecJpgImageLayerArgs                
- Height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Quality int
 - The compression quality of the JPEG output. Range is from 
0to100and the default is70. - Width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- Height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Quality int
 - The compression quality of the JPEG output. Range is from 
0to100and the default is70. - Width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- height String
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - quality Integer
 - The compression quality of the JPEG output. Range is from 
0to100and the default is70. - width String
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - quality number
 - The compression quality of the JPEG output. Range is from 
0to100and the default is70. - width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- height str
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label str
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - quality int
 - The compression quality of the JPEG output. Range is from 
0to100and the default is70. - width str
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- height String
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - quality Number
 - The compression quality of the JPEG output. Range is from 
0to100and the default is70. - width String
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
TransformOutputCustomPresetCodecPngImage, TransformOutputCustomPresetCodecPngImageArgs              
- Start string
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - Key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - Layers
List<Transform
Output Custom Preset Codec Png Image Layer>  - One or more 
layerblocks as defined below. - Range string
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at5minutes and30seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - Step string
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - Stretch
Mode string - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - Sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- Start string
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - Key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - Label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - Layers
[]Transform
Output Custom Preset Codec Png Image Layer  - One or more 
layerblocks as defined below. - Range string
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at5minutes and30seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - Step string
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - Stretch
Mode string - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - Sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- start String
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - key
Frame StringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - layers
List<Transform
Output Custom Preset Codec Png Image Layer>  - One or more 
layerblocks as defined below. - range String
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at5minutes and30seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - step String
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - stretch
Mode String - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode String - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- start string
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - key
Frame stringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label string
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - layers
Transform
Output Custom Preset Codec Png Image Layer[]  - One or more 
layerblocks as defined below. - range string
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at5minutes and30seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - step string
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - stretch
Mode string - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode string - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- start str
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - key_
frame_ strinterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label str
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - layers
Sequence[Transform
Output Custom Preset Codec Png Image Layer]  - One or more 
layerblocks as defined below. - range str
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at5minutes and30seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - step str
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - stretch_
mode str - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync_
mode str - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
- start String
 - The position in the input video from where to start generating thumbnails. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Also supports a macro{Best}, which tells the encoder to select the best thumbnail from the first few seconds of the video and will only produce one thumbnail, no matter what other settings are forstepandrange. - key
Frame StringInterval  - The distance between two key frames. The value should be non-zero in the range 
0.5to20seconds, specified in ISO 8601 format. Note that this setting is ignored ifsync_modeis set toPassthrough, where the KeyFrameInterval value will follow the input source setting. Defaults toPT2S. - label String
 - Specifies the label for the codec. The label can be used to control muxing behavior.
 - layers List<Property Map>
 - One or more 
layerblocks as defined below. - range String
 - The position relative to transform preset start time in the input video at which to stop generating thumbnails. The value can be in ISO 8601 format (For example, 
PT5M30Sto stop at5minutes and30seconds from start time), or a frame count (For example,300to stop at the 300th frame from the frame at start time. If this value is1, it means only producing one thumbnail at start time), or a relative value to the stream duration (For example,50%to stop at half of stream duration from start time). The default value is100%, which means to stop at the end of the stream. - step String
 - The intervals at which thumbnails are generated. The value can be in ISO 8601 format (For example, 
PT05Sfor one image every 5 seconds), or a frame count (For example,30for one image every 30 frames), or a relative value to stream duration (For example,10%for one image every 10% of stream duration). Note: Step value will affect the first generated thumbnail, which may not be exactly the one specified at transform preset start time. This is due to the encoder, which tries to select the best thumbnail between start time and Step position from start time as the first output. As the default value is10%, it means if stream has long duration, the first generated thumbnail might be far away from the one specified at start time. Try to select reasonable value for Step if the first thumbnail is expected close to start time, or set Range value at1if only one thumbnail is needed at start time. - stretch
Mode String - The resizing mode, which indicates how the input video will be resized to fit the desired output resolution(s). Possible values are 
AutoFit,AutoSizeorNone. Default toAutoSize. - sync
Mode String - Specifies the synchronization mode for the video. Possible values are 
Auto,Cfr,PassthroughorVfr. Default toAuto. 
TransformOutputCustomPresetCodecPngImageLayer, TransformOutputCustomPresetCodecPngImageLayerArgs                
- Height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- Height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - Label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - Width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- height String
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - width String
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- height string
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label string
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - width string
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- height str
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label str
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - width str
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
- height String
 - The height of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in height as the input. - label String
 - The alphanumeric label for this layer, which can be used in multiplexing different video and audio layers, or in naming the output file.
 - width String
 - The width of the output video for this layer. The value can be absolute (in pixels) or relative (in percentage). For example 
50%means the output video has half as many pixels in width as the input. 
TransformOutputCustomPresetFilter, TransformOutputCustomPresetFilterArgs          
- Crop
Rectangle TransformOutput Custom Preset Filter Crop Rectangle  - A 
crop_rectangleblock as defined above. - Deinterlace
Transform
Output Custom Preset Filter Deinterlace  - A 
deinterlaceblock as defined below. - Fade
In TransformOutput Custom Preset Filter Fade In  - A 
fade_inblock as defined above. - Fade
Out TransformOutput Custom Preset Filter Fade Out  - A 
fade_outblock as defined above. - Overlays
List<Transform
Output Custom Preset Filter Overlay>  - One or more 
overlayblocks as defined below. - Rotation string
 - The rotation to be applied to the input video before it is encoded. Possible values are 
Auto,None,Rotate90,Rotate180,Rotate270,orRotate0. Default toAuto. 
- Crop
Rectangle TransformOutput Custom Preset Filter Crop Rectangle  - A 
crop_rectangleblock as defined above. - Deinterlace
Transform
Output Custom Preset Filter Deinterlace  - A 
deinterlaceblock as defined below. - Fade
In TransformOutput Custom Preset Filter Fade In  - A 
fade_inblock as defined above. - Fade
Out TransformOutput Custom Preset Filter Fade Out  - A 
fade_outblock as defined above. - Overlays
[]Transform
Output Custom Preset Filter Overlay  - One or more 
overlayblocks as defined below. - Rotation string
 - The rotation to be applied to the input video before it is encoded. Possible values are 
Auto,None,Rotate90,Rotate180,Rotate270,orRotate0. Default toAuto. 
- crop
Rectangle TransformOutput Custom Preset Filter Crop Rectangle  - A 
crop_rectangleblock as defined above. - deinterlace
Transform
Output Custom Preset Filter Deinterlace  - A 
deinterlaceblock as defined below. - fade
In TransformOutput Custom Preset Filter Fade In  - A 
fade_inblock as defined above. - fade
Out TransformOutput Custom Preset Filter Fade Out  - A 
fade_outblock as defined above. - overlays
List<Transform
Output Custom Preset Filter Overlay>  - One or more 
overlayblocks as defined below. - rotation String
 - The rotation to be applied to the input video before it is encoded. Possible values are 
Auto,None,Rotate90,Rotate180,Rotate270,orRotate0. Default toAuto. 
- crop
Rectangle TransformOutput Custom Preset Filter Crop Rectangle  - A 
crop_rectangleblock as defined above. - deinterlace
Transform
Output Custom Preset Filter Deinterlace  - A 
deinterlaceblock as defined below. - fade
In TransformOutput Custom Preset Filter Fade In  - A 
fade_inblock as defined above. - fade
Out TransformOutput Custom Preset Filter Fade Out  - A 
fade_outblock as defined above. - overlays
Transform
Output Custom Preset Filter Overlay[]  - One or more 
overlayblocks as defined below. - rotation string
 - The rotation to be applied to the input video before it is encoded. Possible values are 
Auto,None,Rotate90,Rotate180,Rotate270,orRotate0. Default toAuto. 
- crop_
rectangle TransformOutput Custom Preset Filter Crop Rectangle  - A 
crop_rectangleblock as defined above. - deinterlace
Transform
Output Custom Preset Filter Deinterlace  - A 
deinterlaceblock as defined below. - fade_
in TransformOutput Custom Preset Filter Fade In  - A 
fade_inblock as defined above. - fade_
out TransformOutput Custom Preset Filter Fade Out  - A 
fade_outblock as defined above. - overlays
Sequence[Transform
Output Custom Preset Filter Overlay]  - One or more 
overlayblocks as defined below. - rotation str
 - The rotation to be applied to the input video before it is encoded. Possible values are 
Auto,None,Rotate90,Rotate180,Rotate270,orRotate0. Default toAuto. 
- crop
Rectangle Property Map - A 
crop_rectangleblock as defined above. - deinterlace Property Map
 - A 
deinterlaceblock as defined below. - fade
In Property Map - A 
fade_inblock as defined above. - fade
Out Property Map - A 
fade_outblock as defined above. - overlays List<Property Map>
 - One or more 
overlayblocks as defined below. - rotation String
 - The rotation to be applied to the input video before it is encoded. Possible values are 
Auto,None,Rotate90,Rotate180,Rotate270,orRotate0. Default toAuto. 
TransformOutputCustomPresetFilterCropRectangle, TransformOutputCustomPresetFilterCropRectangleArgs              
- Height string
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Left string
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Top string
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Width string
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- Height string
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Left string
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Top string
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Width string
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height String
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left String
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top String
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width String
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height string
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left string
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top string
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width string
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height str
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left str
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top str
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width str
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height String
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left String
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top String
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width String
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
TransformOutputCustomPresetFilterDeinterlace, TransformOutputCustomPresetFilterDeinterlaceArgs            
TransformOutputCustomPresetFilterFadeIn, TransformOutputCustomPresetFilterFadeInArgs              
- Duration string
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - Fade
Color string - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - Start string
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- Duration string
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - Fade
Color string - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - Start string
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- duration String
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - fade
Color String - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - start String
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- duration string
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - fade
Color string - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - start string
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- duration str
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - fade_
color str - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - start str
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- duration String
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - fade
Color String - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - start String
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
TransformOutputCustomPresetFilterFadeOut, TransformOutputCustomPresetFilterFadeOutArgs              
- Duration string
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - Fade
Color string - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - Start string
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- Duration string
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - Fade
Color string - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - Start string
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- duration String
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - fade
Color String - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - start String
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- duration string
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - fade
Color string - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - start string
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- duration str
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - fade_
color str - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - start str
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
- duration String
 - The duration of the fade effect in the video. The value can be in ISO 8601 format (For example, PT05S to fade In/Out a color during 5 seconds), or a frame count (For example, 10 to fade 10 frames from the start time), or a relative value to stream duration (For example, 10% to fade 10% of stream duration).
 - fade
Color String - The color for the fade in/out. It can be on the CSS Level1 colors or an RGB/hex value: e.g: 
rgb(255,0,0),0xFF0000or#FF0000. - start String
 - The position in the input video from where to start fade. The value can be in ISO 8601 format (For example, 
PT05Sto start at 5 seconds), or a frame count (For example,10to start at the 10th frame), or a relative value to stream duration (For example,10%to start at 10% of stream duration). Default to0. 
TransformOutputCustomPresetFilterOverlay, TransformOutputCustomPresetFilterOverlayArgs            
- Audio
Transform
Output Custom Preset Filter Overlay Audio  - An 
audioblock as defined above. - Video
Transform
Output Custom Preset Filter Overlay Video  A
videoblock as defined below.NOTE: Each overlay can only have one type:
audioorvideo. If you need to apply different type you must create one overlay for each one.
- Audio
Transform
Output Custom Preset Filter Overlay Audio  - An 
audioblock as defined above. - Video
Transform
Output Custom Preset Filter Overlay Video  A
videoblock as defined below.NOTE: Each overlay can only have one type:
audioorvideo. If you need to apply different type you must create one overlay for each one.
- audio
Transform
Output Custom Preset Filter Overlay Audio  - An 
audioblock as defined above. - video
Transform
Output Custom Preset Filter Overlay Video  A
videoblock as defined below.NOTE: Each overlay can only have one type:
audioorvideo. If you need to apply different type you must create one overlay for each one.
- audio
Transform
Output Custom Preset Filter Overlay Audio  - An 
audioblock as defined above. - video
Transform
Output Custom Preset Filter Overlay Video  A
videoblock as defined below.NOTE: Each overlay can only have one type:
audioorvideo. If you need to apply different type you must create one overlay for each one.
- audio
Transform
Output Custom Preset Filter Overlay Audio  - An 
audioblock as defined above. - video
Transform
Output Custom Preset Filter Overlay Video  A
videoblock as defined below.NOTE: Each overlay can only have one type:
audioorvideo. If you need to apply different type you must create one overlay for each one.
- audio Property Map
 - An 
audioblock as defined above. - video Property Map
 A
videoblock as defined below.NOTE: Each overlay can only have one type:
audioorvideo. If you need to apply different type you must create one overlay for each one.
TransformOutputCustomPresetFilterOverlayAudio, TransformOutputCustomPresetFilterOverlayAudioArgs              
- Input
Label string - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - Audio
Gain doubleLevel  - The gain level of audio in the overlay. The value should be in the range 
0to1.0. The default is1.0. - End string
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - Fade
In stringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - Fade
Out stringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - Start string
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- Input
Label string - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - Audio
Gain float64Level  - The gain level of audio in the overlay. The value should be in the range 
0to1.0. The default is1.0. - End string
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - Fade
In stringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - Fade
Out stringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - Start string
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- input
Label String - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - audio
Gain DoubleLevel  - The gain level of audio in the overlay. The value should be in the range 
0to1.0. The default is1.0. - end String
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - fade
In StringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - fade
Out StringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - start String
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- input
Label string - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - audio
Gain numberLevel  - The gain level of audio in the overlay. The value should be in the range 
0to1.0. The default is1.0. - end string
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - fade
In stringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - fade
Out stringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - start string
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- input_
label str - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - audio_
gain_ floatlevel  - The gain level of audio in the overlay. The value should be in the range 
0to1.0. The default is1.0. - end str
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - fade_
in_ strduration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - fade_
out_ strduration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - start str
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- input
Label String - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - audio
Gain NumberLevel  - The gain level of audio in the overlay. The value should be in the range 
0to1.0. The default is1.0. - end String
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - fade
In StringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - fade
Out StringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - start String
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
TransformOutputCustomPresetFilterOverlayVideo, TransformOutputCustomPresetFilterOverlayVideoArgs              
- Input
Label string - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - Audio
Gain doubleLevel  - The gain level of audio in the overlay. The value should be in range between 
0to1.0. The default is1.0. - Crop
Rectangle TransformOutput Custom Preset Filter Overlay Video Crop Rectangle  - A 
crop_rectangleblock as defined above. - End string
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - Fade
In stringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - Fade
Out stringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - Opacity double
 - The opacity of the overlay. The value should be in the range between 
0to1.0. Default to1.0, which means the overlay is opaque. - Position
Transform
Output Custom Preset Filter Overlay Video Position  - A 
positionblock as defined above. - Start string
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- Input
Label string - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - Audio
Gain float64Level  - The gain level of audio in the overlay. The value should be in range between 
0to1.0. The default is1.0. - Crop
Rectangle TransformOutput Custom Preset Filter Overlay Video Crop Rectangle  - A 
crop_rectangleblock as defined above. - End string
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - Fade
In stringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - Fade
Out stringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - Opacity float64
 - The opacity of the overlay. The value should be in the range between 
0to1.0. Default to1.0, which means the overlay is opaque. - Position
Transform
Output Custom Preset Filter Overlay Video Position  - A 
positionblock as defined above. - Start string
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- input
Label String - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - audio
Gain DoubleLevel  - The gain level of audio in the overlay. The value should be in range between 
0to1.0. The default is1.0. - crop
Rectangle TransformOutput Custom Preset Filter Overlay Video Crop Rectangle  - A 
crop_rectangleblock as defined above. - end String
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - fade
In StringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - fade
Out StringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - opacity Double
 - The opacity of the overlay. The value should be in the range between 
0to1.0. Default to1.0, which means the overlay is opaque. - position
Transform
Output Custom Preset Filter Overlay Video Position  - A 
positionblock as defined above. - start String
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- input
Label string - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - audio
Gain numberLevel  - The gain level of audio in the overlay. The value should be in range between 
0to1.0. The default is1.0. - crop
Rectangle TransformOutput Custom Preset Filter Overlay Video Crop Rectangle  - A 
crop_rectangleblock as defined above. - end string
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - fade
In stringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - fade
Out stringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - opacity number
 - The opacity of the overlay. The value should be in the range between 
0to1.0. Default to1.0, which means the overlay is opaque. - position
Transform
Output Custom Preset Filter Overlay Video Position  - A 
positionblock as defined above. - start string
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- input_
label str - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - audio_
gain_ floatlevel  - The gain level of audio in the overlay. The value should be in range between 
0to1.0. The default is1.0. - crop_
rectangle TransformOutput Custom Preset Filter Overlay Video Crop Rectangle  - A 
crop_rectangleblock as defined above. - end str
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - fade_
in_ strduration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - fade_
out_ strduration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - opacity float
 - The opacity of the overlay. The value should be in the range between 
0to1.0. Default to1.0, which means the overlay is opaque. - position
Transform
Output Custom Preset Filter Overlay Video Position  - A 
positionblock as defined above. - start str
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
- input
Label String - The label of the job input which is to be used as an overlay. The input must specify exact one file. You can specify an image file in JPG, PNG, GIF or BMP format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file.
 - audio
Gain NumberLevel  - The gain level of audio in the overlay. The value should be in range between 
0to1.0. The default is1.0. - crop
Rectangle Property Map - A 
crop_rectangleblock as defined above. - end String
 - The end position, with reference to the input video, at which the overlay ends. The value should be in ISO 8601 format. For example, 
PT30Sto end the overlay at 30 seconds into the input video. If not specified or the value is greater than the input video duration, the overlay will be applied until the end of the input video if the overlay media duration is greater than the input video duration, else the overlay will last as long as the overlay media duration. - fade
In StringDuration  - The duration over which the overlay fades in onto the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade in (same as 
PT0S). - fade
Out StringDuration  - The duration over which the overlay fades out of the input video. The value should be in ISO 8601 duration format. If not specified the default behavior is to have no fade out (same as 
PT0S). - opacity Number
 - The opacity of the overlay. The value should be in the range between 
0to1.0. Default to1.0, which means the overlay is opaque. - position Property Map
 - A 
positionblock as defined above. - start String
 - The start position, with reference to the input video, at which the overlay starts. The value should be in ISO 8601 format. For example, 
PT05Sto start the overlay at 5 seconds into the input video. If not specified the overlay starts from the beginning of the input video. 
TransformOutputCustomPresetFilterOverlayVideoCropRectangle, TransformOutputCustomPresetFilterOverlayVideoCropRectangleArgs                  
- Height string
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Left string
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Top string
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Width string
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- Height string
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Left string
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Top string
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Width string
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height String
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left String
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top String
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width String
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height string
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left string
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top string
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width string
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height str
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left str
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top str
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width str
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height String
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left String
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top String
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width String
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
TransformOutputCustomPresetFilterOverlayVideoPosition, TransformOutputCustomPresetFilterOverlayVideoPositionArgs                
- Height string
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Left string
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Top string
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Width string
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- Height string
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Left string
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Top string
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - Width string
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height String
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left String
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top String
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width String
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height string
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left string
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top string
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width string
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height str
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left str
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top str
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width str
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
- height String
 - The height of the rectangular region in pixels. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - left String
 - The number of pixels from the left-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - top String
 - The number of pixels from the top-margin. This can be absolute pixel value (e.g 
100), or relative to the size of the video (For example,50%). - width String
 - The width of the rectangular region in pixels. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example,50%). 
TransformOutputCustomPresetFormat, TransformOutputCustomPresetFormatArgs          
- Jpg
Transform
Output Custom Preset Format Jpg  - A 
jpgblock as defined below. - Mp4
Transform
Output Custom Preset Format Mp4  - A 
mp4block as defined below. - Png
Transform
Output Custom Preset Format Png  - A 
pngblock as defined below. - Transport
Stream TransformOutput Custom Preset Format Transport Stream  A
transport_streamblock as defined below.NOTE: Each format can only have one type:
jpg,mp4,pngortransport_stream. If you need to apply different type you must create one format for each one.
- Jpg
Transform
Output Custom Preset Format Jpg  - A 
jpgblock as defined below. - Mp4
Transform
Output Custom Preset Format Mp4  - A 
mp4block as defined below. - Png
Transform
Output Custom Preset Format Png  - A 
pngblock as defined below. - Transport
Stream TransformOutput Custom Preset Format Transport Stream  A
transport_streamblock as defined below.NOTE: Each format can only have one type:
jpg,mp4,pngortransport_stream. If you need to apply different type you must create one format for each one.
- jpg
Transform
Output Custom Preset Format Jpg  - A 
jpgblock as defined below. - mp4
Transform
Output Custom Preset Format Mp4  - A 
mp4block as defined below. - png
Transform
Output Custom Preset Format Png  - A 
pngblock as defined below. - transport
Stream TransformOutput Custom Preset Format Transport Stream  A
transport_streamblock as defined below.NOTE: Each format can only have one type:
jpg,mp4,pngortransport_stream. If you need to apply different type you must create one format for each one.
- jpg
Transform
Output Custom Preset Format Jpg  - A 
jpgblock as defined below. - mp4
Transform
Output Custom Preset Format Mp4  - A 
mp4block as defined below. - png
Transform
Output Custom Preset Format Png  - A 
pngblock as defined below. - transport
Stream TransformOutput Custom Preset Format Transport Stream  A
transport_streamblock as defined below.NOTE: Each format can only have one type:
jpg,mp4,pngortransport_stream. If you need to apply different type you must create one format for each one.
- jpg
Transform
Output Custom Preset Format Jpg  - A 
jpgblock as defined below. - mp4
Transform
Output Custom Preset Format Mp4  - A 
mp4block as defined below. - png
Transform
Output Custom Preset Format Png  - A 
pngblock as defined below. - transport_
stream TransformOutput Custom Preset Format Transport Stream  A
transport_streamblock as defined below.NOTE: Each format can only have one type:
jpg,mp4,pngortransport_stream. If you need to apply different type you must create one format for each one.
- jpg Property Map
 - A 
jpgblock as defined below. - mp4 Property Map
 - A 
mp4block as defined below. - png Property Map
 - A 
pngblock as defined below. - transport
Stream Property Map A
transport_streamblock as defined below.NOTE: Each format can only have one type:
jpg,mp4,pngortransport_stream. If you need to apply different type you must create one format for each one.
TransformOutputCustomPresetFormatJpg, TransformOutputCustomPresetFormatJpgArgs            
- Filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- Filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- filename
Pattern String - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- filename_
pattern str - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- filename
Pattern String - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
TransformOutputCustomPresetFormatMp4, TransformOutputCustomPresetFormatMp4Args            
- Filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - Output
Files List<TransformOutput Custom Preset Format Mp4Output File>  - One or more 
output_fileblocks as defined below. 
- Filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - Output
Files []TransformOutput Custom Preset Format Mp4Output File  - One or more 
output_fileblocks as defined below. 
- filename
Pattern String - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - output
Files List<TransformOutput Custom Preset Format Mp4Output File>  - One or more 
output_fileblocks as defined below. 
- filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - output
Files TransformOutput Custom Preset Format Mp4Output File[]  - One or more 
output_fileblocks as defined below. 
- filename_
pattern str - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - output_
files Sequence[TransformOutput Custom Preset Format Mp4Output File]  - One or more 
output_fileblocks as defined below. 
- filename
Pattern String - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - output
Files List<Property Map> - One or more 
output_fileblocks as defined below. 
TransformOutputCustomPresetFormatMp4OutputFile, TransformOutputCustomPresetFormatMp4OutputFileArgs              
- Labels List<string>
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- Labels []string
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- labels List<String>
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- labels string[]
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- labels Sequence[str]
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- labels List<String>
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
TransformOutputCustomPresetFormatPng, TransformOutputCustomPresetFormatPngArgs            
- Filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- Filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- filename
Pattern String - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- filename_
pattern str - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
- filename
Pattern String - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. 
TransformOutputCustomPresetFormatTransportStream, TransformOutputCustomPresetFormatTransportStreamArgs              
- Filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - Output
Files List<TransformOutput Custom Preset Format Transport Stream Output File>  - One or more 
output_fileblocks as defined above. 
- Filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - Output
Files []TransformOutput Custom Preset Format Transport Stream Output File  - One or more 
output_fileblocks as defined above. 
- filename
Pattern String - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - output
Files List<TransformOutput Custom Preset Format Transport Stream Output File>  - One or more 
output_fileblocks as defined above. 
- filename
Pattern string - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - output
Files TransformOutput Custom Preset Format Transport Stream Output File[]  - One or more 
output_fileblocks as defined above. 
- filename_
pattern str - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - output_
files Sequence[TransformOutput Custom Preset Format Transport Stream Output File]  - One or more 
output_fileblocks as defined above. 
- filename
Pattern String - The file naming pattern used for the creation of output files. The following macros are supported in the file name: 
{Basename}- An expansion macro that will use the name of the input video file. If the base name(the file suffix is not included) of the input video file is less than 32 characters long, the base name of input video files will be used. If the length of base name of the input video file exceeds 32 characters, the base name is truncated to the first 32 characters in total length.{Extension}- The appropriate extension for this format.{Label}- The label assigned to the codec/layer.{Index}- A unique index for thumbnails. Only applicable to thumbnails.{AudioStream}- string "Audio" plus audio stream number(start from 1).{Bitrate}- The audio/video bitrate in kbps. Not applicable to thumbnails.{Codec}- The type of the audio/video codec.{Resolution}- The video resolution. Any unsubstituted macros will be collapsed and removed from the filename. - output
Files List<Property Map> - One or more 
output_fileblocks as defined above. 
TransformOutputCustomPresetFormatTransportStreamOutputFile, TransformOutputCustomPresetFormatTransportStreamOutputFileArgs                  
- Labels List<string>
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- Labels []string
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- labels List<String>
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- labels string[]
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- labels Sequence[str]
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
- labels List<String>
 - The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels 
v1andv2, and one audio layer with labela1, then an array like["v1", "a1"]tells the encoder to produce an output file with the video track represented byv1and the audio track represented bya1. 
TransformOutputFaceDetectorPreset, TransformOutputFaceDetectorPresetArgs          
- Analysis
Resolution string - Possible values are 
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default toSourceResolution. - Blur
Type string - Specifies the type of blur to apply to faces in the output video. Possible values are 
Black,Box,High,Low,andMed. - Experimental
Options Dictionary<string, string> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - Face
Redactor stringMode  - This mode provides the ability to choose between the following settings: 1) 
Analyze- For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2)Combined- Additionally redacts(blurs) detected faces. 3)Redact- This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default toAnalyze. 
- Analysis
Resolution string - Possible values are 
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default toSourceResolution. - Blur
Type string - Specifies the type of blur to apply to faces in the output video. Possible values are 
Black,Box,High,Low,andMed. - Experimental
Options map[string]string - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - Face
Redactor stringMode  - This mode provides the ability to choose between the following settings: 1) 
Analyze- For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2)Combined- Additionally redacts(blurs) detected faces. 3)Redact- This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default toAnalyze. 
- analysis
Resolution String - Possible values are 
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default toSourceResolution. - blur
Type String - Specifies the type of blur to apply to faces in the output video. Possible values are 
Black,Box,High,Low,andMed. - experimental
Options Map<String,String> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - face
Redactor StringMode  - This mode provides the ability to choose between the following settings: 1) 
Analyze- For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2)Combined- Additionally redacts(blurs) detected faces. 3)Redact- This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default toAnalyze. 
- analysis
Resolution string - Possible values are 
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default toSourceResolution. - blur
Type string - Specifies the type of blur to apply to faces in the output video. Possible values are 
Black,Box,High,Low,andMed. - experimental
Options {[key: string]: string} - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - face
Redactor stringMode  - This mode provides the ability to choose between the following settings: 1) 
Analyze- For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2)Combined- Additionally redacts(blurs) detected faces. 3)Redact- This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default toAnalyze. 
- analysis_
resolution str - Possible values are 
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default toSourceResolution. - blur_
type str - Specifies the type of blur to apply to faces in the output video. Possible values are 
Black,Box,High,Low,andMed. - experimental_
options Mapping[str, str] - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - face_
redactor_ strmode  - This mode provides the ability to choose between the following settings: 1) 
Analyze- For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2)Combined- Additionally redacts(blurs) detected faces. 3)Redact- This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default toAnalyze. 
- analysis
Resolution String - Possible values are 
SourceResolutionorStandardDefinition. Specifies the maximum resolution at which your video is analyzed. which will keep the input video at its original resolution when analyzed. UsingStandardDefinitionwill resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching toStandardDefinitionwill reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected. Default toSourceResolution. - blur
Type String - Specifies the type of blur to apply to faces in the output video. Possible values are 
Black,Box,High,Low,andMed. - experimental
Options Map<String> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - face
Redactor StringMode  - This mode provides the ability to choose between the following settings: 1) 
Analyze- For detection only. This mode generates a metadata JSON file marking appearances of faces throughout the video. Where possible, appearances of the same person are assigned the same ID. 2)Combined- Additionally redacts(blurs) detected faces. 3)Redact- This enables a 2-pass process, allowing for selective redaction of a subset of detected faces. It takes in the metadata file from a prior analyze pass, along with the source video, and a user-selected subset of IDs that require redaction. Default toAnalyze. 
TransformOutputVideoAnalyzerPreset, TransformOutputVideoAnalyzerPresetArgs          
- Audio
Analysis stringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - Audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - Experimental
Options Dictionary<string, string> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - Insights
Type string - Defines the type of insights that you want the service to generate. The allowed values are 
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default toAllInsights. 
- Audio
Analysis stringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - Audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - Experimental
Options map[string]string - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - Insights
Type string - Defines the type of insights that you want the service to generate. The allowed values are 
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default toAllInsights. 
- audio
Analysis StringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - audio
Language String - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - experimental
Options Map<String,String> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - insights
Type String - Defines the type of insights that you want the service to generate. The allowed values are 
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default toAllInsights. 
- audio
Analysis stringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - audio
Language string - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - experimental
Options {[key: string]: string} - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - insights
Type string - Defines the type of insights that you want the service to generate. The allowed values are 
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default toAllInsights. 
- audio_
analysis_ strmode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - audio_
language str - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - experimental_
options Mapping[str, str] - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - insights_
type str - Defines the type of insights that you want the service to generate. The allowed values are 
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default toAllInsights. 
- audio
Analysis StringMode  - Possible values are 
BasicorStandard. Determines the set of audio analysis operations to be performed. Default toStandard. - audio
Language String - The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode:Basic, since automatic language detection is not included in basic mode. If the language isn't specified, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernible speech. If automatic detection fails to find the language, transcription would fall back to 
en-US. The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. - experimental
Options Map<String> - Dictionary containing key value pairs for parameters not exposed in the preset itself.
 - insights
Type String - Defines the type of insights that you want the service to generate. The allowed values are 
AudioInsightsOnly,VideoInsightsOnly, andAllInsights. If you set this toAllInsightsand the input is audio only, then only audio insights are generated. Similarly, if the input is video only, then only video insights are generated. It is recommended that you not useAudioInsightsOnlyif you expect some of your inputs to be video only; or useVideoInsightsOnlyif you expect some of your inputs to be audio only. Your Jobs in such conditions would error out. Default toAllInsights. 
Import
Transforms can be imported using the resource id, e.g.
$ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaServices/media1/transforms/transform1
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
 - Azure Classic pulumi/pulumi-azure
 - License
 - Apache-2.0
 - Notes
 - This Pulumi package is based on the 
azurermTerraform Provider.