Terraform API Gateway OPTIONS pre-flight not being communicated - axios

I have an AWS API Gateway REST API with Lambda Proxy integration created with Terraform. The Lambdas perform CRUD operations on a DynamoDB table. Cognito User Pool Authorizer is set for anything that isn't a GET or OPTIONS request.
I've configured OPTIONS preflight as a MOCK integration in a Terraform module with Access-Control-Allow-Headers, Access-Control-Allow-Methods, Access-Control-Allow-Origin for all resources (modified from this article on Medium):
# api/cors/main.tf
resource "aws_api_gateway_method" "cors_method" {
rest_api_id = var.api_id
resource_id = var.resource_id
http_method = "OPTIONS"
authorization = "NONE"
}
resource "aws_api_gateway_method_response" "cors_method_response" {
rest_api_id = var.api_id
resource_id = var.resource_id
http_method = aws_api_gateway_method.cors_method.http_method
status_code = "200"
response_models = {
"application/json" = "Empty"
}
response_parameters = {
"method.response.header.Access-Control-Allow-Headers" = true
"method.response.header.Access-Control-Allow-Methods" = true,
"method.response.header.Access-Control-Allow-Origin" = true,
}
depends_on = [aws_api_gateway_method.cors_method]
}
resource "aws_api_gateway_integration" "cors_integration" {
rest_api_id = var.api_id
resource_id = var.resource_id
http_method = aws_api_gateway_method.cors_method.http_method
type = "MOCK"
depends_on = [aws_api_gateway_method.cors_method]
}
resource "aws_api_gateway_integration_response" "cors_integration_response" {
rest_api_id = var.api_id
resource_id = var.resource_id
http_method = aws_api_gateway_method.cors_method.http_method
status_code = aws_api_gateway_method_response.cors_method_response.status_code
response_parameters = {
"method.response.header.Access-Control-Allow-Headers" = "'${join(",", var.headers)}'"
"method.response.header.Access-Control-Allow-Methods" = "'${join(",", var.methods)}'",
"method.response.header.Access-Control-Allow-Origin" = "'${join(",", var.origins)}'",
}
depends_on = [aws_api_gateway_method_response.cors_method_response]
}
# api/cors/variables.tf
variable "api_id" {}
variable "resource_id" {}
variable "origins" {
type = list(string)
default = ["http://localhost:3000"]
}
variable "methods" {
type = list(string)
}
variable "headers" {
type = list(string)
default = ["Content-Type", "X-Amz-Date", "Authorization", "X-Api-Key", "X-Amz-Security-Token"]
}
# api/main.tf
# API, other API resources, deployment, stage also defined here
# /users/{username}/follow
resource "aws_api_gateway_resource" "follow" {
rest_api_id = aws_api_gateway_rest_api.api.id
parent_id = aws_api_gateway_resource.username.id
path_part = "follow"
}
module "FollowCORS" {
source = "./cors"
api_id = aws_api_gateway_rest_api.api.id
resource_id = aws_api_gateway_resource.follow.id
methods = ["DELETE", "OPTIONS", "PUT"]
}
All Lambda functions return the same response headers as OPTIONS:
// Lambda for this endpoint/method
const AWS = require('aws-sdk');
// Set up DynamoDB DocumentClient
exports.handler = async (event) => {
let body = {};
let statusCode = 200;
const headers = {
'Access-Control-Allow-Headers':
'Accept,Authorization,Content-Type,X-Amz-Date,X-Amz-Security-Token',
'Access-Control-Allow-Methods': 'DELETE,OPTIONS,PUT',
'Access-Control-Allow-Origin': '*', // temporary update as per suggestion
'Content-Type': 'application/json',
Vary: 'Origin',
};
// original `Access-Control-Allow-Origin` setting
// const allowedOrigins = ['http://localhost:3000'];
// const origin = event.headers.origin || '';
// if (allowedOrigins.includes(origin)) {
// headers['Access-Control-Allow-Origin'] = origin;
// }
console.log('Event:\n', event);
// Check/Get `cognito:username` from event
try {
// DELETE operation on DynamoDB table
body.isFollowing = false;
} catch (err) {
console.error('Error:\n', err);
statusCode = 500;
body = { error: { message: err.message } };
}
return { statusCode, headers, body: JSON.stringify(body) };
};
I'm able to successfully interact with all endpoints via Postman and can make GET requests from my Next.js app (useSWR, fetch, axios all OK).
The problem is I can't make any other requests (DELETE, PATCH, POST, PUT) with axios or fetch:
axios
.delete(`${API_BASE_URL}/users/testuser/follow`, {
headers: {
Authorization: `Bearer ${id_token}`,
},
})
.then((response) => {
console.log(response);
})
.catch((error) => {
console.log(error);
});
fetch(`${API_BASE_URL}/users/testuser/follow`, {
method: 'DELETE',
headers: {
Authorization: `Bearer ${id_token}`,
},
})
.then((res) => res.json())
.then((data) => {
console.log(data);
});
where API_BASE_URL is https://${API_ID}.execute-api.{REGION}.amazonaws.com/{STAGE}. The item I'm attempting to DELETE does exist (created via Postman since PUT request also fails with same error).
I get the following error:
Access to fetch at 'https://{API_BASE_URL}/users/testuser/follow' from origin 'http://localhost:3000' has been blocked by CORS policy: Response to preflight request doesn't pass access control check: No 'Access-Control-Allow-Origin' header is present on the requested resource. If an opaque response serves your needs, set the request's mode to 'no-cors' to fetch the resource with CORS disabled.
I also get the
TypeError: Failed to fetch
when the fetch request fails.
The calls made via axios and fetch don't seem to even hit the API because no CloudWatch logs get created.
Also, the axios request headers shown in the Network tab only have Referer: http://localhost:3000/ but no Origin: http://localhost:3000. No response headers are shown:
As suggested in the comments, I tested the OPTIONS method for this endpoint with Postman, but get the following error:
... not a valid key=value pair (missing equal-sign) in Authorization header ...
I know this error appears when there are other issues (e.g. wrong method used), so I'm guessing something is wrong with my preflight config.
The same error occurs for all other endpoints and methods.
What's blocking these requests from being made?
Postman response headers for DELETE request
Terraform v1.2.7 on windows_amd64
hashicorp/aws version ~> 4.26

To anyone else experiencing this issue (and using Terraform), before you spend days headbanging your keyboard, did you try turning it off and on again? terraform destroy, then terraform apply.
I did also make modifications to my code, so I can't provide the satisfaction of pointing to a line number and saying that was the issue. It seems the changes weren't being deployed properly until I "turned it off, then on again".
Alas, the following is a condensed version of my updated code with the changes indicated:
Terraform
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.26"
}
}
}
provider "aws" {
profile = "default"
region = "ca-central-1"
}
# Cognito
# ...
# Rest API
resource "aws_api_gateway_rest_api" "test_api" {
name = "test_api"
description = "Test REST API"
endpoint_configuration {
types = ["REGIONAL"]
}
}
resource "aws_api_gateway_authorizer" "authorizer" {
name = "CognitoUserPoolAuthorizer"
rest_api_id = aws_api_gateway_rest_api.test_api.id
identity_source = "method.request.header.Authorization"
type = "COGNITO_USER_POOLS"
provider_arns = ["${aws_cognito_user_pool.user_pool.arn}"]
}
resource "aws_api_gateway_resource" "test_resource" {
rest_api_id = aws_api_gateway_rest_api.test_api.id
parent_id = aws_api_gateway_rest_api.test_api.root_resource_id
path_part = "test"
}
# Lambda Proxy
resource "aws_api_gateway_method" "test_method" {
rest_api_id = aws_api_gateway_rest_api.test_api.id
resource_id = aws_api_gateway_resource.test_resource.id
http_method = "POST"
authorization = "COGNITO_USER_POOLS"
authorizer_id = aws_api_gateway_authorizer.authorizer.id
}
resource "aws_api_gateway_method_response" "test_method_response" {
rest_api_id = aws_api_gateway_rest_api.test_api.id
resource_id = aws_api_gateway_resource.test_resource.id
http_method = aws_api_gateway_method.test_method.http_method
# -------------------------------------------------------------------------
# 1. Changed value from `string` to `int` ("200" -> `200`)
# -------------------------------------------------------------------------
status_code = 200
response_parameters = {
"method.response.header.Access-Control-Allow-Origin" = true,
}
depends_on = [aws_api_gateway_method.test_method]
}
resource "aws_lambda_function" "test_handler" {
function_name = "test_handler"
handler = "index.handler"
runtime = "nodejs16.x"
filename = "../lambda/index.zip"
source_code_hash = filebase64sha256("../lambda/index.zip")
role = aws_iam_role.test_lambda_role.arn
memory_size = "128"
timeout = "5"
}
resource "aws_iam_role" "test_lambda_role" {
name = "test_lambda_role"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "lambda.amazonaws.com"
}
}]
})
}
resource "aws_iam_policy" "test_lambda_policy" {
name = "test_lambda_policy"
path = "/"
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = ["logs:CreateLogStream", "logs:CreateLogGroup", "logs:PutLogEvents"]
Resource = "arn:aws:logs:ca-central-1:*:*"
}
]
})
}
resource "aws_iam_role_policy_attachment" "lambda_role_policy" {
role = aws_iam_role.test_lambda_role.name
policy_arn = aws_iam_policy.test_lambda_policy.arn
}
resource "aws_api_gateway_integration" "test_integration" {
rest_api_id = aws_api_gateway_rest_api.test_api.id
resource_id = aws_api_gateway_method.test_method.resource_id
http_method = aws_api_gateway_method.test_method.http_method
integration_http_method = "POST"
type = "AWS_PROXY"
uri = aws_lambda_function.test_handler.invoke_arn
depends_on = [aws_api_gateway_method.cors_method, aws_lambda_function.test_handler]
}
resource "aws_lambda_permission" "test_permission" {
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.test_handler.function_name
principal = "apigateway.amazonaws.com"
# -------------------------------------------------------------------------
# 2. Updated `source_arn` to be more specific
# previously `${aws_api_gateway_rest_api.test_api.execution_arn}/*/*/*}`
# the remaining `*` enables permissions for all API stages
# -------------------------------------------------------------------------
source_arn = "${aws_api_gateway_rest_api.test_api.execution_arn}/*/${aws_api_gateway_method.test_method.http_method}${aws_api_gateway_resource.test_resource.path}"
}
# CORS
resource "aws_api_gateway_method" "cors_method" {
rest_api_id = aws_api_gateway_rest_api.test_api.id
resource_id = aws_api_gateway_resource.test_resource.id
http_method = "OPTIONS"
authorization = "NONE"
}
resource "aws_api_gateway_method_response" "cors_method_response" {
rest_api_id = aws_api_gateway_rest_api.test_api.id
resource_id = aws_api_gateway_resource.test_resource.id
http_method = aws_api_gateway_method.cors_method.http_method
# -------------------------------------------------------------------------
# 3. Changed `status_code` value from `string` to `int` ("200" -> `200`)
# -------------------------------------------------------------------------
status_code = 200
response_models = {
"application/json" = "Empty"
}
response_parameters = {
# -------------------------------------------------------------------------
# 4. Added Access-Control-Allow-Credentials
# calls with `Authorization` header fail without this
# -------------------------------------------------------------------------
"method.response.header.Access-Control-Allow-Credentials" = true,
"method.response.header.Access-Control-Allow-Headers" = true
"method.response.header.Access-Control-Allow-Methods" = true,
"method.response.header.Access-Control-Allow-Origin" = true,
}
depends_on = [aws_api_gateway_method.cors_method]
}
resource "aws_api_gateway_integration" "cors_integration" {
rest_api_id = aws_api_gateway_rest_api.test_api.id
resource_id = aws_api_gateway_resource.test_resource.id
http_method = aws_api_gateway_method.cors_method.http_method
type = "MOCK"
# -------------------------------------------------------------------------
# 5. Added `request_templates`
# -------------------------------------------------------------------------
request_templates = {
"application/json" = "{ \"statusCode\": 200 }"
}
depends_on = [aws_api_gateway_method.cors_method]
}
resource "aws_api_gateway_integration_response" "cors_integration_response" {
rest_api_id = aws_api_gateway_rest_api.test_api.id
resource_id = aws_api_gateway_resource.test_resource.id
http_method = aws_api_gateway_method.cors_method.http_method
status_code = aws_api_gateway_method_response.cors_method_response.status_code
# careful with double/single quotes here
response_parameters = {
# -------------------------------------------------------------------------
# 6. Added Access-Control-Allow-Credentials
# calls with `Authorization` header fail without this
# -------------------------------------------------------------------------
"method.response.header.Access-Control-Allow-Credentials" = "'true'"
# -------------------------------------------------------------------------
# 7. Simplified `Access-Control-Allow-Headers` to `Authorization,Content-Type` for methods with
# `COGNITO_USER_POOLS` authorization and `Content-Type` only for methods without authorization
# -------------------------------------------------------------------------
"method.response.header.Access-Control-Allow-Headers" = "'Authorization,Content-Type'"
"method.response.header.Access-Control-Allow-Methods" = "'OPTIONS,POST'",
"method.response.header.Access-Control-Allow-Origin" = "'${join(",", ["http://localhost:3000"])}'",
}
depends_on = [aws_api_gateway_method_response.cors_method_response]
}
resource "aws_api_gateway_deployment" "test_deployment" {
rest_api_id = aws_api_gateway_rest_api.test_api.id
depends_on = [aws_api_gateway_integration.test_integration]
}
resource "aws_api_gateway_stage" "api_stage" {
deployment_id = aws_api_gateway_deployment.test_deployment.id
rest_api_id = aws_api_gateway_rest_api.test_api.id
stage_name = "dev"
}
# CloudWatch
# ...
Lambda
exports.handler = async (event) => {
let body = {};
let statusCode = 200;
// 8. Return same `Access-Control-Allow-Headers` as `Integration Response`
const headers = {
'Access-Control-Allow-Credentials': true,
'Access-Control-Allow-Headers': 'Authorization,Content-Type',
'Access-Control-Allow-Methods': 'OPTIONS,POST',
'Content-Type': 'application/json',
Vary: 'Origin',
};
const allowedOrigins = ['http://localhost:3000'];
const origin = event.headers?.origin || '';
if (allowedOrigins.includes(origin)) {
headers['Access-Control-Allow-Origin'] = origin;
}
console.log('Event:\n', event);
const currentUsername = event.requestContext.authorizer.claims['cognito:username'] || '';
if (!currentUsername) {
statusCode = 401;
body = JSON.stringify({ error: { message: 'Unauthorized' } });
return { statusCode, headers, body };
}
const payload = JSON.parse(event.body);
if (!payload.test) {
statusCode = 400;
body = JSON.stringify({ error: { message: 'Invalid input' } });
return { statusCode, headers, body };
}
body.greeting = `Hello from the other side, ${currentUsername}`;
body.test = payload.test;
return { statusCode, headers, body: JSON.stringify(body) };
};
Prior to the issue being fixed, I noticed the API endpoint for the Lambda's API Gateway trigger (Using the AWS Console: Lambda > Functions > function_name > Configuration > Triggers) had the following error:
The API with ID API_ID doesn’t include a resource with path /* having an integration arn:aws:lambda:REGION:ACCOUNT_ID:function:FUNCTION_NAME on the ANY method.
whereas now there is a clickable link with the complete resource path (as per updated aws_lambda_permission Terraform config). I'm not sure how much this was contributing to the problem.
May the CORS be with you.

You need to ensure the "Access-Control-Allow-Origin" field in the lambda header response matches the OPTIONS response. It could be worth setting them both to * temporarily to check there are no other issues...
Lambda response header:
'headers': {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
},
OPTIONS response:
{
"Access-Control-Allow-Headers": ["Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token"],
"Access-Control-Allow-Methods": ["DELETE,OPTIONS,PUT"],
"Access-Control-Allow-Origin": ["*"],
"Content-Type":["application/json"]
}
Also it would be interesting to see the response you get hitting the endpoint from POSTMAN. It's possible that the gateway Integration Response isn't passing through the lambda header values, and needs to be configured.

Related

Getting Hashicorp Provider Error while creating aks cluster using terraform

I am getting error while creating AKS Cluster Using Terraform
Error:
Error: Failed to query available provider packages
Could not retrieve the list of available versions for provider hashicorp/file: provider registry registry.terraform.io does not have a provider named
registry.terraform.io/hashicorp/file
All modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which modules are currently depending on hashicorp/file, run the following command:
terraform providers
Above is the error i am facing. I have written Terraform code as shown below.
provider.tf:
============
provider "azurerm" {
features {}
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "2.39.0"
}
}
}
terraform.tfvars:
=================
resource_group_name = "a0474899701"
location = "CentralUS"
cluster_name = "aks01"
kubernetes_version = "1.24.4"
system_node_count = 2
user_node_count = 1
spot_node_count = 2
acr_name = "devops_acr_tf"
aks_network_plugin = "kubenet"
client_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
client_secret = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
main.tf:
========
# Create an Resource Group
resource "azurerm_resource_group" "aks-rg" {
name = var.resource_group_name
location = var.location
}
# Create an ACR instance
resource "azurerm_container_registry" "acr" {
name = var.acr_name
resource_group_name = azurerm_resource_group.aks-rg.name
location = var.location
sku = "Standard"
admin_enabled = false
}
# Create a role assignment to allow AKS to access ACR
resource "azurerm_role_assignment" "role_acrpull" {
scope = azurerm_container_registry.acr.id
role_definition_name = "AcrPull"
# principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity.0.object_id
principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity.0.client_id
skip_service_principal_aad_check = true
}
# Create a Kubernetes secret to hold the ACR credentials
# It holds the ACR credentials in a Docker config JSON format
resource "kubernetes_secret" "acr_credentials" {
metadata {
name = "acr-credentials"
}
data = {
".dockerconfigjson" = azurerm_container_registry.acr.docker_config_json
}
}
# Private Key Creation
resource "tls_private_key" "aks_ssh_key" {
algorithm = "RSA"
}
resource "tls_public_key" "aks_ssh_key" {
private_key_pem = tls_private_key.aks_ssh_key.private_key_pem
}
resource "file" "private_key" {
content = tls_private_key.aks_ssh_key.private_key_pem
filename = "aks_private_key.pem"
}
# virtual network (aks_vnet) is created in the same resource group
resource "azurerm_virtual_network" "aks_vnet" {
name = "${var.resource_group_name}-vnet01"
# address_space = ["10.0.0.0/16"]
address_space = ["10.172.144.0/26"]
location = azurerm_resource_group.aks_rg.location
resource_group_name = azurerm_resource_group.aks_rg.name
}
# subnet (aks_subnet) is created within the virtual network
resource "azurerm_subnet" "aks_subnet" {
name = "${var.resource_group_name}-vnet01-subnet01"
resource_group_name = azurerm_resource_group.aks_rg.name
virtual_network_name = azurerm_virtual_network.aks_vnet.name
# address_prefix = "10.0.1.0/24"
address_prefix = "10.172.144.0/27"
}
resource "azurerm_network_security_group" "azure-sg" {
name = "${var.resource_group_name}-nsg01"
location = azurerm_resource_group.aks_rg.location
resource_group_name = azurerm_resource_group.aks_rg.name
security_rule {
name = "allow-ssh"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_kubernetes_cluster" "aks" {
name = var.cluster_name
kubernetes_version = var.kubernetes_version
location = var.location
resource_group_name = azurerm_resource_group.aks-rg.name
security_group_name = azurerm_network_security_group.azure-sg.name
dns_prefix = var.cluster_name
default_node_pool {
name = "system"
node_count = var.system_node_count
vm_size = "Standard_E4as_v4"
os_disk_size_gb = 20
os_disk_type = "Ephemeral"
vnet_subnet_id = azurerm_subnet.aks_subnet.id
os_type = "Linux"
node_image_version = "AKSUbuntu-1804gen2containerd-2023.01.10"
enable_node_public_ip = false
enable_auto_scaling = false
}
additional_node_pools {
name = "user"
node_count = var.user_node_count
vm_size = "Standard_E8as_v4"
os_disk_size_gb = 20
os_disk_type = "Ephemeral"
vnet_subnet_id = azurerm_subnet.aks_subnet.id
type = "User"
# os_type = "RedHat"
os_type = "Linux"
node_image_version = "AKSUbuntu-1804gen2containerd-2023.01.10"
enable_node_public_ip = false
enable_auto_scaling = false
}
additional_node_pools {
name = "spot"
node_count = var.spot_node_count
vm_size = "Standard_D2s_v3"
os_disk_size_gb = 20
os_disk_type = "Ephemeral"
vnet_subnet_id = azurerm_subnet.aks_subnet.id
type = "User"
# os_type = "RedHat"
os_type = "Linux"
node_image_version = "AKSUbuntu-1804gen2containerd-2023.01.10"
max_price = 0.5
enable_node_public_ip = false
enable_auto_scaling = false
eviction_policy = "Spot"
taints = ["kubernetes.azure.com/scalesetpriority=spot:NoSchedule"]
labels = {
"kubernetes.azure.com/scalesetpriority" = "spot"
}
}
kubernetes_cluster_config {
max_pods_per_node = "110"
}
identity {
type = "SystemAssigned"
}
linux_profile {
admin_username = "azureuser"
ssh_key {
key_data = tls_public_key.aks_ssh_key.public_key_openssh
}
}
network_profile {
pod_cidr = "172.32.0.0/19"
service_cidr = "172.32.0.0/19"
load_balancer_sku = "Standard"
network_plugin = var.aks_network_plugin
dns_service_ip = "172.32.0.10"
docker_bridge_cidr = "172.34.0.1/16"
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
tags = {
Environment = "Development"
}
}
# ACR can be attached to the AKS cluster using the "azurerm_kubernetes_cluster_container_registry_config" resource type
resource "azurerm_kubernetes_cluster_container_registry_config" "acr_config" {
cluster_name = azurerm_kubernetes_cluster.aks.name
registry_id = azurerm_container_registry.acr.id
namespace = "aks"
default_action = "Allow"
}
Above is my Code I am facing above error. even i have changed my provider.tf still facing same issue. Can anyone please tell me How to solve this error
Thanks
I tried to reproduce the same in my environment to create AKS Cluster using Terraform:
Kindly use the below Terraform code to create AKS Cluster.
Terraform Code:
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "venkatesh" {
name = "venkat-resources"
location = "West Europe"
}
resource "azurerm_container_registry" "venkatreg" {
name = "Testcontainerregistery"
resource_group_name = azurerm_resource_group.venkatesh.name
location = azurerm_resource_group.venkatesh.location
sku = "Premium"
}
resource "azurerm_kubernetes_cluster" "venkatcluster" {
name = "example-aks1"
location = azurerm_resource_group.venkatesh.location
resource_group_name = azurerm_resource_group.venkatesh.name
dns_prefix = "exampleaks1"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
}
identity {
type = "SystemAssigned"
}
tags = {
Environment = "Production"
}
}
resource "azurerm_role_assignment" "example" {
principal_id = azurerm_kubernetes_cluster.venkatcluster.kubelet_identity[0].object_id
role_definition_name = "AcrPull"
scope = azurerm_container_registry.venkatreg.id
skip_service_principal_aad_check = true
}
Terraform apply:
Once ran the code resources are created successfully.
Reference:
Create a Kubernetes cluster with Azure Kubernetes Service using Terraform.

HTTP API Gateway - How to map authorizer to route in Terraform?

In console, you have to "attach" the authorizer to a route. How is this achieved in TF?
resource "aws_apigatewayv2_api" "service_http_api" {
name = var.name
description = var.description
tags = var.tags
protocol_type = "HTTP"
}
resource "aws_apigatewayv2_authorizer" "authorizer" {
api_id = aws_apigatewayv2_api.service_http_api.id
authorizer_type = "JWT"
identity_sources = ["$request.header.Authorization"]
name = "AuthName"
jwt_configuration {
audience = [var.open_id_audience]
issuer = var.open_id_issuer
}
}
resource "aws_apigatewayv2_integration" "function" {
api_id = aws_apigatewayv2_api.service_http_api.id
integration_type = "AWS_PROXY"
connection_type = "INTERNET"
integration_method = "POST"
integration_uri = var.function_arn
payload_format_version = "2.0"
}
resource "aws_apigatewayv2_route" "route" {
api_id = aws_apigatewayv2_api.service_http_api.id
authorizer_id = aws_apigatewayv2_authorizer.authorizer.id
target = "integrations/${aws_apigatewayv2_integration.function.id}"
authorization_type = "JWT"
route_key = "/route/"
}
resource "aws_apigatewayv2_route" "route_integration" {
api_id = aws_apigatewayv2_api.service_http_api.id
authorization_type = "AWS_IAM"
route_key = "/route/"
target = "integrations/${aws_apigatewayv2_integration.function.id}"
}
resource "aws_apigatewayv2_api" "service_http_api" {
name = var.name
description = var.description
tags = var.tags
protocol_type = "HTTP"
}
resource "aws_apigatewayv2_authorizer" "authorizer" {
api_id = aws_apigatewayv2_api.service_http_api.id
authorizer_type = "JWT"
identity_sources = ["$request.header.Authorization"]
name = "AuthName"
jwt_configuration {
audience = [var.open_id_audience]
issuer = var.open_id_issuer
}
}
resource "aws_apigatewayv2_integration" "function" {
api_id = aws_apigatewayv2_api.service_http_api.id
integration_type = "AWS_PROXY"
connection_type = "INTERNET"
integration_method = "POST"
integration_uri = var.function_arn
payload_format_version = "2.0"
}
resource "aws_apigatewayv2_route" "route" {
api_id = aws_apigatewayv2_api.service_http_api.id
authorizer_id = aws_apigatewayv2_authorizer.authorizer.id
target = "integrations/${aws_apigatewayv2_integration.function.id}"
authorization_type = "JWT"
route_key = "/route/"
}

Github repository webhook error when creating AWS CodePipeline using Terraform

I am receiving the following error when creating a Github repository webhook as part of CodePipeline.
github_repository_webhook.github_hook: Creating...
Error: POST https://api.github.com/repos/myrepoxx/static-web-example/hooks: 404 Not Found []
Tried changing the URL and repository with no success
hooks.tf
resource "aws_codepipeline_webhook" "codepipeline_webhook" {
authentication = "GITHUB_HMAC"
name = "codepipeline-webhook"
target_action = "Source"
target_pipeline = aws_codepipeline.static_web_pipeline.name
authentication_configuration {
secret_token = random_string.github_secret.result
}
filter {
json_path = "$.ref"
match_equals = "refs/heads/{Branch}"
}
tags = {}
}
resource "github_repository_webhook" "github_hook" {
repository = var.repository_name
events = ["push"]
configuration {
url = aws_codepipeline_webhook.codepipeline_webhook.url
insecure_ssl = "0"
content_type = "json"
secret = random_string.github_secret.result
}
}
resource "random_string" "github_secret" {
length = 99
special = false
}
var.tf
variable "env" {
default = "dev"
}
variable "region" {
default = "eu-west-1"
}
variable "repository_branch" {
default = "master"
}
variable "repository_owner" {
default = "myrepoxx"
}
variable "repository_name" {
default = "static-web-example"
}
variable "static_web_bucket_name" {
default = "myrepoxx-static-web-example-bucket"
}
variable "artifacts_bucket_name" {
default = "myrepoxx-static-web-example-artifacts"
}
variable "github_token" {
}
output "web_public_url" {
value = aws_s3_bucket.static_web_bucket.website_endpoint
}
According to the terraform docs, from terraform 0.13 onwards the github provider source should be integrations/github
terraform {
required_providers {
github = {
source = "integrations/github"
version = "4.x.x"
}
}
}
then you can configure the provider to set your OAuth/personal access token if you need it
provider "github" {
owner = var.repo_owner
token = var.github_token
}

Can't create secrets in kubernetes with terraform cloud

I am trying to create a secret within my kubernetes cluster and terraform cloud.
I can create the cluster with no problems, but problems arise when I try to inject a secret in the cluster.
Here is a simplified version of my terraform manifest:
terraform {
backend "remote" {
organization = "my-org"
// Workspaces separate deployment envs (like prod, stage, or UK, Italy)
workspaces {
name = "my-workspace-name"
}
}
}
resource "google_container_cluster" "demo-k8s-cluster" {
name = "demo-cluster"
location = var.region
initial_node_count = 1
project = var.project-id
master_auth {
username = ""
password = ""
client_certificate_config {
issue_client_certificate = false
}
}
node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]
// service_account = var.service-account
metadata = {
disable-legacy-endpoints = "true"
}
}
timeouts {
create = "30m"
update = "40m"
}
}
provider "kubernetes" {
host = google_container_cluster.demo-k8s-cluster.endpoint
username = google_container_cluster.demo-k8s-cluster.master_auth.0.username
password = google_container_cluster.demo-k8s-cluster.master_auth.0.password
client_certificate = base64decode(google_container_cluster.demo-k8s-cluster.master_auth.0.client_certificate)
client_key = base64decode(google_container_cluster.demo-k8s-cluster.master_auth.0.client_key)
cluster_ca_certificate = base64decode(google_container_cluster.demo-k8s-cluster.master_auth.0.cluster_ca_certificate)
load_config_file = "false"
}
resource "kubernetes_secret" "cloudsql-db-credentials" {
metadata {
name = "cloudsql-instance-credentials-test"
}
data = {
"stack-creds.json" = var.service-account
}
}
The plan works fine, I get the following error at Apply stage:
Error: secrets is forbidden: User "system:anonymous" cannot create resource "secrets" in API group "" in the namespace "default"
on infrastructure.tf line 149, in resource "kubernetes_secret" "cloudsql-db-credentials":
149: resource "kubernetes_secret" "cloudsql-db-credentials" {
As per #mario comment, it turns out terraform cloud can't get the right identity and can't connect to the cluster to inject the secret. Instead of using terraform cloud I have instead opted to use GCS backend and managed to get it working. The following configuration works:
terraform {
backend "gcs" {
bucket = "infrastructure-state-bucket"
prefix = "test/so_simple2"
}
}
// The project-id variable contains project id to use.
variable "project-id" {
type = string
}
variable "region" {
type = string
}
variable "cluster-name" {
type = string
}
provider "google" {
project = var.project-id
region = var.region
}
provider "random" {}
resource "random_id" "id" {
byte_length = 4
prefix = "${var.cluster-name}-"
}
resource "google_container_cluster" "cluster" {
name = random_id.id.hex
location = var.region
initial_node_count = 1
project = var.project-id
}
provider "kubernetes" {
host = google_container_cluster.cluster.endpoint
username = google_container_cluster.cluster.master_auth.0.username
password = google_container_cluster.cluster.master_auth.0.password
client_certificate = base64decode(google_container_cluster.cluster.master_auth.0.client_certificate)
client_key = base64decode(google_container_cluster.cluster.master_auth.0.client_key)
cluster_ca_certificate = base64decode(google_container_cluster.cluster.master_auth.0.cluster_ca_certificate)
// This is a deal breaker, if is set to false I get same error.
// load_config_file = "false"
}
resource "kubernetes_secret" "example" {
metadata {
name = "basic-auth"
}
data = {
username = "admin"
password = "P4ssw0rd"
}
type = "kubernetes.io/basic-auth"
}

terraform azurerm - cannot destroy public ip

New to terraform so i'm hoping this is an easy issue. I'm creating some resources in azure and deploying a simple flask application to AKS. Creating works fine using terraform plan. I can see that azure is provisioned correctly and I can hit the flask app.
When I try to run terraform destroy I get the error - "StatusCode=400...In order to delete the public IP, disassociate/detach the Public IP address from the resource.
Main.tf
variable "subscription_id" {}
variable "client_id" {}
variable "client_secret" {}
variable "tenant_id" {}
provider "azurerm" {
version = "=1.28.0"
tenant_id = "${var.tenant_id}"
subscription_id = "${var.subscription_id}"
}
resource "azurerm_resource_group" "aks" {
name = "${var.name_prefix}"
location = "${var.location}"
}
resource "azurerm_kubernetes_cluster" "k8s" {
name = "${var.name_prefix}-aks"
kubernetes_version = "${var.kubernetes_version}"
location = "${azurerm_resource_group.aks.location}"
resource_group_name = "${azurerm_resource_group.aks.name}"
dns_prefix = "AKS-${var.dns_prefix}"
agent_pool_profile {
name = "${var.node_pool_name}"
count = "${var.node_pool_size}"
vm_size = "${var.node_pool_vmsize}"
os_type = "${var.node_pool_os}"
os_disk_size_gb = 30
}
service_principal {
client_id = "${var.client_id}"
client_secret = "${var.client_secret}"
}
tags = {
environment = "${var.env_tag}"
}
}
provider "helm" {
install_tiller = true
kubernetes {
host = "${azurerm_kubernetes_cluster.k8s.kube_config.0.host}"
client_certificate = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_certificate)}"
client_key = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_key)}"
cluster_ca_certificate = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.cluster_ca_certificate)}"
}
}
# Create Static Public IP Address to be used by Nginx Ingress
resource "azurerm_public_ip" "nginx_ingress" {
name = "nginx-ingress-public-ip"
location = "${azurerm_kubernetes_cluster.k8s.location}"
resource_group_name = "${azurerm_kubernetes_cluster.k8s.node_resource_group}"
allocation_method = "Static"
domain_name_label = "${var.name_prefix}"
}
# Add Kubernetes Stable Helm charts repo
data "helm_repository" "stable" {
name = "stable"
url = "https://kubernetes-charts.storage.googleapis.com"
}
# Install Nginx Ingress using Helm Chart
resource "helm_release" "nginx_ingress" {
name = "nginx-ingress"
repository = "${data.helm_repository.stable.metadata.0.name}"
chart = "nginx-ingress"
set {
name = "rbac.create"
value = "false"
}
set {
name = "controller.service.externalTrafficPolicy"
value = "Local"
}
set {
name = "controller.service.loadBalancerIP"
value = "${azurerm_public_ip.nginx_ingress.ip_address}"
}
}
Also deploying my kubernetes stuff in this file k8s.tf
provider "kubernetes" {
host = "${azurerm_kubernetes_cluster.k8s.kube_config.0.host}"
username = "${azurerm_kubernetes_cluster.k8s.kube_config.0.username}"
password = "${azurerm_kubernetes_cluster.k8s.kube_config.0.password}"
client_certificate = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_certificate)}"
client_key = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_key)}"
cluster_ca_certificate = "${base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.cluster_ca_certificate)}"
}
resource "kubernetes_deployment" "flask-api-deployment" {
metadata {
name = "flask-api-deployment"
}
spec {
replicas = 2
selector {
match_labels {
component = "api"
}
}
template {
metadata {
labels = {
component = "api"
}
}
spec {
container {
image = "xxx.azurecr.io/sampleflask:0.1.0"
name = "flask-api"
port {
container_port = 5000
}
}
}
}
}
}
resource "kubernetes_service" "api-cluster-ip-service" {
metadata {
name = "flask-api-cluster-ip-service"
}
spec {
selector {
component = "api"
}
port {
port = 5000
target_port = 5000
}
}
}
resource "kubernetes_ingress" "flask-ingress-service" {
metadata {
name = "flask-ingress-service"
}
spec {
backend {
service_name = "flask-api-cluster-ip-service"
service_port = 5000
}
}
}
For your issue, this is a problem about the sequence of the resources. When you create the nginx ingress with the public IP, the public IP should be created first. But when you delete the public IP, it's still in use by the nginx ingress. So It causes the error.
The solution is that you can detach the public IP from the resource which uses it. Then use the destroy the resource from the Terraform. You can take a look at the explanation in the issue.
The user #4c74356b41 is right, but to give more information assuming a config like this:
resource "azurerm_kubernetes_cluster" "k8s" {
name = "aks-e2x-nuffield-uat"
resource_group_name = azurerm_resource_group.core_rg.name
location = azurerm_resource_group.core_rg.location
dns_prefix = "aks-e2x-nuffield-uat-dns"
kubernetes_version = var.k8s_version
# NOTE currently only a single node pool, default, is configured
private_cluster_enabled = true
...
network_profile {
network_plugin = "kubenet"
load_balancer_sku = "standard"
service_cidr = var.k8s_service_subnet
pod_cidr = var.k8s_pod_subnet
docker_bridge_cidr = "172.17.0.1/16"
dns_service_ip = "40.0.8.10" # within the service subnet
}
}
Where the load_balancer_sku is set to standard, you can access the public IP to be used elsewhere like this:
data "azurerm_public_ip" "k8s_load_balancer_ip" {
name = reverse(split("/", tolist(azurerm_kubernetes_cluster.k8s.network_profile.0.load_balancer_profile.0.effective_outbound_ips)[0]))[0]
resource_group_name = azurerm_kubernetes_cluster.k8s.node_resource_group
}
output "ingress_public_ip" {
# value = azurerm_public_ip.ingress.ip_address
value = data.azurerm_public_ip.k8s_load_balancer_ip.ip_address
}