Skip to content

Commit

Permalink
azurerm_media_transform - FaceDetectorPreset and `video_analyzer_…
Browse files Browse the repository at this point in the history
…preset` are no longer supported (#24002)

* fix acctest failure

* update code
  • Loading branch information
sinbai authored Nov 28, 2023
1 parent 79b4740 commit ab73338
Show file tree
Hide file tree
Showing 2 changed files with 169 additions and 171 deletions.
293 changes: 166 additions & 127 deletions internal/services/media/media_transform_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"github.com/hashicorp/terraform-provider-azurerm/helpers/tf"
"github.com/hashicorp/terraform-provider-azurerm/helpers/validate"
"github.com/hashicorp/terraform-provider-azurerm/internal/clients"
"github.com/hashicorp/terraform-provider-azurerm/internal/features"
"github.com/hashicorp/terraform-provider-azurerm/internal/services/media/migration"
"github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk"
"github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation"
Expand All @@ -25,7 +26,7 @@ import (
)

func resourceMediaTransform() *pluginsdk.Resource {
return &pluginsdk.Resource{
resource := &pluginsdk.Resource{
Create: resourceMediaTransformCreateUpdate,
Read: resourceMediaTransformRead,
Update: resourceMediaTransformCreateUpdate,
Expand Down Expand Up @@ -185,75 +186,6 @@ func resourceMediaTransform() *pluginsdk.Resource {
},
},
},
// lintignore:XS003
"video_analyzer_preset": {
Type: pluginsdk.TypeList,
Optional: true,
MaxItems: 1,
Elem: &pluginsdk.Resource{
Schema: map[string]*pluginsdk.Schema{
// https://go.microsoft.com/fwlink/?linkid=2109463
"audio_language": {
Type: pluginsdk.TypeString,
Optional: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"audio_analysis_mode": {
Type: pluginsdk.TypeString,
Optional: true,
Default: string(encodings.AudioAnalysisModeStandard),
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForAudioAnalysisMode(), false),
},
"insights_type": {
Type: pluginsdk.TypeString,
Optional: true,
Default: string(encodings.InsightsTypeAllInsights),
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForInsightsType(), false),
},
"experimental_options": {
Type: pluginsdk.TypeMap,
Optional: true,
Elem: &pluginsdk.Schema{
Type: pluginsdk.TypeString,
},
},
},
},
},
// lintignore:XS003
"face_detector_preset": {
Type: pluginsdk.TypeList,
Optional: true,
MaxItems: 1,
Elem: &pluginsdk.Resource{
Schema: map[string]*pluginsdk.Schema{
"analysis_resolution": {
Type: pluginsdk.TypeString,
Optional: true,
Default: string(encodings.AnalysisResolutionSourceResolution),
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForAnalysisResolution(), false),
},
"blur_type": {
Type: pluginsdk.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForBlurType(), false),
},
"experimental_options": {
Type: pluginsdk.TypeMap,
Optional: true,
Elem: &pluginsdk.Schema{
Type: pluginsdk.TypeString,
},
},
"face_redactor_mode": {
Type: pluginsdk.TypeString,
Optional: true,
Default: string(encodings.FaceRedactorModeAnalyze),
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForFaceRedactorMode(), false),
},
},
},
},
"custom_preset": {
Type: pluginsdk.TypeList,
Optional: true,
Expand Down Expand Up @@ -1187,6 +1119,86 @@ func resourceMediaTransform() *pluginsdk.Resource {
},
},
}

if !features.FourPointOh() {
// NOTE: `face_detector_preset` should be removed as the Media Services Face Detector has been retired and would not have a direct replacement.
// Refer to https://learn.microsoft.com/en-us/azure/media-services/latest/analyze-face-retirement-migration-overview for details.
// TODO Remove in 4.0
resource.Schema["output"].Elem.(*pluginsdk.Resource).Schema["face_detector_preset"] = &pluginsdk.Schema{
Type: pluginsdk.TypeList,
Optional: true,
MaxItems: 1,
Elem: &pluginsdk.Resource{
Schema: map[string]*pluginsdk.Schema{
"analysis_resolution": {
Type: pluginsdk.TypeString,
Optional: true,
Default: string(encodings.AnalysisResolutionSourceResolution),
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForAnalysisResolution(), false),
},
"blur_type": {
Type: pluginsdk.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForBlurType(), false),
},
"experimental_options": {
Type: pluginsdk.TypeMap,
Optional: true,
Elem: &pluginsdk.Schema{
Type: pluginsdk.TypeString,
},
},
"face_redactor_mode": {
Type: pluginsdk.TypeString,
Optional: true,
Default: string(encodings.FaceRedactorModeAnalyze),
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForFaceRedactorMode(), false),
},
},
},
Deprecated: "`face_detector_preset` will be removed in version 4.0 of the AzureRM Provider as it has been retired.",
}
// NOTE: `video_analyzer_preset` should be removed as it has been retired.
// Refer to https://learn.microsoft.com/en-us/azure/media-services/latest/analyze-video-retirement-migration-overview for details.
// TODO Remove in 4.0
resource.Schema["output"].Elem.(*pluginsdk.Resource).Schema["video_analyzer_preset"] = &pluginsdk.Schema{
Type: pluginsdk.TypeList,
Optional: true,
MaxItems: 1,
Elem: &pluginsdk.Resource{
Schema: map[string]*pluginsdk.Schema{
// https://go.microsoft.com/fwlink/?linkid=2109463
"audio_language": {
Type: pluginsdk.TypeString,
Optional: true,
ValidateFunc: validation.StringIsNotEmpty,
},
"audio_analysis_mode": {
Type: pluginsdk.TypeString,
Optional: true,
Default: string(encodings.AudioAnalysisModeStandard),
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForAudioAnalysisMode(), false),
},
"insights_type": {
Type: pluginsdk.TypeString,
Optional: true,
Default: string(encodings.InsightsTypeAllInsights),
ValidateFunc: validation.StringInSlice(encodings.PossibleValuesForInsightsType(), false),
},
"experimental_options": {
Type: pluginsdk.TypeMap,
Optional: true,
Elem: &pluginsdk.Schema{
Type: pluginsdk.TypeString,
},
},
},
},
Deprecated: "`video_analyzer_preset` will be removed in version 4.0 of the AzureRM Provider as it has been retired.",
}
}

return resource
}

func resourceMediaTransformCreateUpdate(d *pluginsdk.ResourceData, meta interface{}) error {
Expand Down Expand Up @@ -1333,15 +1345,25 @@ func flattenTransformOutputs(input []encodings.TransformOutput) []interface{} {
}

preset := flattenPreset(transformOutput.Preset)
results = append(results, map[string]interface{}{
"audio_analyzer_preset": preset.audioAnalyzerPresets,
"builtin_preset": preset.builtInPresets,
"custom_preset": preset.customPresets,
"face_detector_preset": preset.faceDetectorPresets,
"on_error_action": onErrorAction,
"relative_priority": relativePriority,
"video_analyzer_preset": preset.videoAnalyzerPresets,
})
if !features.FourPointOh() {
results = append(results, map[string]interface{}{
"audio_analyzer_preset": preset.audioAnalyzerPresets,
"builtin_preset": preset.builtInPresets,
"custom_preset": preset.customPresets,
"face_detector_preset": preset.faceDetectorPresets,
"on_error_action": onErrorAction,
"relative_priority": relativePriority,
"video_analyzer_preset": preset.videoAnalyzerPresets,
})
} else {
results = append(results, map[string]interface{}{
"audio_analyzer_preset": preset.audioAnalyzerPresets,
"builtin_preset": preset.builtInPresets,
"custom_preset": preset.customPresets,
"on_error_action": onErrorAction,
"relative_priority": relativePriority,
})
}
}

return results
Expand All @@ -1351,8 +1373,12 @@ func expandPreset(transform map[string]interface{}) (encodings.Preset, error) {
audioAnalyzerPresets := transform["audio_analyzer_preset"].([]interface{})
builtInPresets := transform["builtin_preset"].([]interface{})
customPresets := transform["custom_preset"].([]interface{})
faceDetectorPresets := transform["face_detector_preset"].([]interface{})
videoAnalyzerPresets := transform["video_analyzer_preset"].([]interface{})
faceDetectorPresets := make([]interface{}, 0)
videoAnalyzerPresets := make([]interface{}, 0)
if !features.FourPointOh() {
faceDetectorPresets = transform["face_detector_preset"].([]interface{})
videoAnalyzerPresets = transform["video_analyzer_preset"].([]interface{})
}

presetsCount := 0
if len(audioAnalyzerPresets) > 0 {
Expand All @@ -1364,14 +1390,21 @@ func expandPreset(transform map[string]interface{}) (encodings.Preset, error) {
if len(customPresets) > 0 {
presetsCount++
}
if len(faceDetectorPresets) > 0 {
presetsCount++
}
if len(videoAnalyzerPresets) > 0 {
presetsCount++
if !features.FourPointOh() {
if len(faceDetectorPresets) > 0 {
presetsCount++
}
if len(videoAnalyzerPresets) > 0 {
presetsCount++
}
}

if presetsCount == 0 {
return nil, fmt.Errorf("output must contain at least one type of preset: builtin_preset, custom_preset, face_detector_preset, video_analyzer_preset or audio_analyzer_preset")
if !features.FourPointOh() {
return nil, fmt.Errorf("output must contain at least one type of preset: builtin_preset, custom_preset, face_detector_preset, video_analyzer_preset or audio_analyzer_preset")
} else {
return nil, fmt.Errorf("output must contain at least one type of preset: builtin_preset, custom_preset or audio_analyzer_preset")
}
}
if presetsCount > 1 {
return nil, fmt.Errorf("more than one type of preset in the same output is not allowed")
Expand Down Expand Up @@ -1439,59 +1472,65 @@ func expandPreset(transform map[string]interface{}) (encodings.Preset, error) {
return builtInPreset, nil
}

if len(faceDetectorPresets) > 0 {
preset := faceDetectorPresets[0].(map[string]interface{})
if !features.FourPointOh() {
if len(faceDetectorPresets) > 0 {
preset := faceDetectorPresets[0].(map[string]interface{})

options, err := expandExperimentalOptions(preset["experimental_options"].(map[string]interface{}))
if err != nil {
return nil, err
}
options, err := expandExperimentalOptions(preset["experimental_options"].(map[string]interface{}))
if err != nil {
return nil, err
}

faceDetectorPreset := &encodings.FaceDetectorPreset{
ExperimentalOptions: options,
}
faceDetectorPreset := &encodings.FaceDetectorPreset{
ExperimentalOptions: options,
}

if v := preset["analysis_resolution"].(string); v != "" {
faceDetectorPreset.Resolution = pointer.To(encodings.AnalysisResolution(v))
}
if v := preset["analysis_resolution"].(string); v != "" {
faceDetectorPreset.Resolution = pointer.To(encodings.AnalysisResolution(v))
}

if v := preset["blur_type"].(string); v != "" {
faceDetectorPreset.BlurType = pointer.To(encodings.BlurType(v))
}
if v := preset["blur_type"].(string); v != "" {
faceDetectorPreset.BlurType = pointer.To(encodings.BlurType(v))
}

if v := preset["face_redactor_mode"].(string); v != "" {
faceDetectorPreset.Mode = pointer.To(encodings.FaceRedactorMode(v))
}
if v := preset["face_redactor_mode"].(string); v != "" {
faceDetectorPreset.Mode = pointer.To(encodings.FaceRedactorMode(v))
}

return faceDetectorPreset, nil
}
return faceDetectorPreset, nil
}

if len(videoAnalyzerPresets) > 0 {
presets := transform["video_analyzer_preset"].([]interface{})
preset := presets[0].(map[string]interface{})
if len(videoAnalyzerPresets) > 0 {
presets := transform["video_analyzer_preset"].([]interface{})
preset := presets[0].(map[string]interface{})

options, err := expandExperimentalOptions(preset["experimental_options"].(map[string]interface{}))
if err != nil {
return nil, err
}
options, err := expandExperimentalOptions(preset["experimental_options"].(map[string]interface{}))
if err != nil {
return nil, err
}

videoAnalyzerPreset := &encodings.VideoAnalyzerPreset{
ExperimentalOptions: options,
}
videoAnalyzerPreset := &encodings.VideoAnalyzerPreset{
ExperimentalOptions: options,
}

if v := preset["audio_language"].(string); v != "" {
videoAnalyzerPreset.AudioLanguage = utils.String(v)
}
if v := preset["audio_analysis_mode"].(string); v != "" {
videoAnalyzerPreset.Mode = pointer.To(encodings.AudioAnalysisMode(v))
}
if v := preset["insights_type"].(string); v != "" {
videoAnalyzerPreset.InsightsToExtract = pointer.To(encodings.InsightsType(v))
if v := preset["audio_language"].(string); v != "" {
videoAnalyzerPreset.AudioLanguage = utils.String(v)
}
if v := preset["audio_analysis_mode"].(string); v != "" {
videoAnalyzerPreset.Mode = pointer.To(encodings.AudioAnalysisMode(v))
}
if v := preset["insights_type"].(string); v != "" {
videoAnalyzerPreset.InsightsToExtract = pointer.To(encodings.InsightsType(v))
}
return videoAnalyzerPreset, nil
}
return videoAnalyzerPreset, nil
}

return nil, fmt.Errorf("output must contain at least one type of preset: builtin_preset, custom_preset, face_detector_preset, video_analyzer_preset or audio_analyzer_preset")
if !features.FourPointOh() {
return nil, fmt.Errorf("output must contain at least one type of preset: builtin_preset, custom_preset, face_detector_preset, video_analyzer_preset or audio_analyzer_preset")
} else {
return nil, fmt.Errorf("output must contain at least one type of preset: builtin_preset, custom_preset or audio_analyzer_preset")
}
}

type flattenedPresets struct {
Expand Down
Loading

0 comments on commit ab73338

Please sign in to comment.