I'm using Pulumi to try and create an EBS application. As part of this I need to push a new docker image to ECR.
I need to push the image after the docker registry has been created but before the beanstalk application version tries to update to the new image.
I have the following code, but want push_image_to_repository() to be somehow called after the ecr.Repository has been created (ignore the ugly os.sytem call, that will be removed later).
application = Application(resource_name=ENV_APP_NAME, name=ENV_APP_NAME)
repository = ecr.Repository(resource_name=APP_NAME, name=APP_NAME)
image_tag = artifact_path.name.replace(".zip", "")
def push_image_to_repository(arn):
upstream = f'{arn}/{image_tag}'
os.system(f'make -C . push UPSTREAM={upstream}')
app_version = ApplicationVersion(
resource_name=ENV_APP_NAME,
application=application,
bucket=releases_bucket.id,
key=artifact_path.name,
)
environment = Environment(
application=application,
resource_name=ENV_APP_NAME,
name=ENV_APP_NAME,
solution_stack_name=STACK,
settings=BEANSTALK_ENVIRONMENT_SETTINGS,
wait_for_ready_timeout=BEANSTALK_ENVIRONMENT_TIMEOUT,
version=app_version,
)
How can I go about doing this?
With typescript (python should be pretty similar):
const repository = new aws.ecr.Repository({...})
const registry = pulumi.output(repostory.registryId).apply(async registryId => {
const credentials = await aws.ecr.getCredentials({ registryId })
const decodedCredentials = Buffer.from(credentials.authorizationToken, "base64").toString()
const [username, password] = decodedCredentials.split(":")
return { server: credentials.proxyEndpoint, username, password }
})
new docker.Image('my-image', {
build: { context: '...' },
imageName: pulumi.interpolate`${repository.repositoryUrl}:my-tag`,
registry: registry,
})
Related
I have this Jenkinsfile which I want to use to build a pipeline:
pipeline {
agent any
environment {
NEXUS_VERSION = "nexus3"
NEXUS_PROTOCOL = "http"
NEXUS_URL = "you-ip-addr-here:8081"
NEXUS_REPOSITORY = "maven-nexus-repo"
NEXUS_CREDENTIAL_ID = "nexus-user-credentials"
}
stages {
stage('Download Helm Charts') {
steps {
sh "echo 'Downloading Helm Charts from Bitbucket repository...'"
// configure credentials under http://192.168.1.28:8080/user/test/credentials/ and put credentials ID
// not sure do I need to point the root folder of the Helm repository or only the single chart
checkout scmGit(
branches: [[name: 'master']],
userRemoteConfigs: [[credentialsId: 'c2672602-dfd5-4158-977c-5009065c867e',
url: 'http://192.168.1.30:7990/scm/jen/helm.git']])
}
}
stage('Test Kubernetes version') {
steps {
sh "echo 'Checking Kubernetes version..'"
// How to do remote test of kubernetes version
}
}
stage('Push Helm Charts to Kubernetes') {
steps {
sh "echo 'building..'"
// push here helm chart from Jenkins server to Kubernetes cluster
}
}
stage('Build Image') {
steps {
sh "echo 'building..'"
// configure credentials under http://192.168.1.28:8080/user/test/credentials/ and put credentials ID
git credentialsId: 'bitbucket-server:50001e738fa6dafbbe7e336853ced1fcbc284fb18ea8cda8b54dbfa3a7bc87b9', url: 'http://192.168.1.30:7990/scm/jen/spring-boot-microservice.git', branch: 'master'
// execute Java -jar ... and build docker image
./gradlew build && java -jar build/libs/gs-spring-boot-docker-0.1.0.jar
docker build -t springio/gs-spring-boot-docker .
}
}
stage('Push Image into Nexus registry') {
steps {
sh "echo 'building..'"
// push compiled docker image into Nexus repository
script {
pom = readMavenPom file: "pom.xml";
filesByGlob = findFiles(glob: "target/*.${pom.packaging}");
echo "${filesByGlob[0].name} ${filesByGlob[0].path} ${filesByGlob[0].directory} ${filesByGlob[0].length} ${filesByGlob[0].lastModified}"
artifactPath = filesByGlob[0].path;
artifactExists = fileExists artifactPath;
if(artifactExists) {
echo "*** File: ${artifactPath}, group: ${pom.groupId}, packaging: ${pom.packaging}, version ${pom.version}";
nexusArtifactUploader(
nexusVersion: NEXUS_VERSION,
protocol: NEXUS_PROTOCOL,
nexusUrl: NEXUS_URL,
groupId: pom.groupId,
version: pom.version,
repository: NEXUS_REPOSITORY,
credentialsId: NEXUS_CREDENTIAL_ID,
artifacts: [
[artifactId: pom.artifactId,
classifier: '',
file: artifactPath,
type: pom.packaging],
[artifactId: pom.artifactId,
classifier: '',
file: "pom.xml",
type: "pom"]
]
);
} else {
error "*** File: ${artifactPath}, could not be found";
}
}
}
}
stage('Deploy Image from Nexus registry into Kubernetes') {
steps {
sh "echo 'building..'"
}
}
stage('Test'){
steps {
sh "echo 'Testing...'"
// implement a check here is it deployed sucessfully
}
}
}
}
How I can deploy the docker image build by Jenkins server and pushed in Nexus repository? If possible I want to use service account with token?
Instead of using 'nexusArtifactUploader', why don´t you use docker push, like you do to build the image?
I guess nexusArtifactUploader uses Nexus API and doesn´t work with docker images, but you can access the registry using docker and the exposed port (defaults to 5000)
withCredentials([string(credentialsId: NEXUS_CREDENTIAL_ID, variable: 'registryToken')]) {
sh 'docker push --creds default:${registryToken} your-registry-url/image-name:image-tag'
}
You may also change docker build command to build the image using your registry name (or tag it after building, see How to push a docker image to a private repository)
This is the file i am trying to deploy for testing, called "00-deploy-mocks":
const { network } = require("hardhat")
const {
developmentChains,
DECIMALS,
initialAnswer
} = require("../helper-hardhat-config")
module.exports = async ({ getNamedAccounts, deployments }) => {
const { deploy, log } = deployments
const { deployer } = getNamedAccounts()
const chainId = network.config.chainId
// If we are on a local development network, we need to deploy mocks!
if (developmentChains.includes(network.name)) {
log("Local network detected! Deploying mocks...")
await deploy("MockV3Aggregator", {
contract: "MockV3Aggregator",
from: deployer,
log: true,
args: [DECIMALS, initialAnswer]
})
log("Mocks Deployed!")
log("------------------------------------------------")
}
}
module.exports.tags = ["all", "mocks"]
This is the error im getting:
yarn run v1.22.15
warning package.json: No license field
$ /home/fbaqueriza/hh-fcc/hardhat-fund-me-fcc/node_modules/.bin/hardhat deploy --tags mocks
Nothing to compile
An unexpected error occurred:
Error: ERROR processing skip func of /home/fbaqueriza/hh-fcc/hardhat-fund-me-fcc/deploy/01-deploy-fund-me.js:
/home/fbaqueriza/hh-fcc/hardhat-fund-me-fcc/deploy/01-deploy-fund-me.js:29
const fundMe = await deploy("FundMe", {
^^^^^
My app is a Python API that I package as a Docker image and use with ECS Fargate (Spot Instances). The code below works.
My issue is that it rebuilds the entire image every time I deploy this – which is very time-consuming (downloads all dependencies, makes the image, uploads, etc). I want it to reuse the exact same image uploaded to ECR by aws-cdk itself.
Is there a way (env variable or else) for me to skip this when I don't touch the app's code and just make changes to the stack?
#!/usr/bin/env node
import * as cdk from "#aws-cdk/core"
import * as ecs from "#aws-cdk/aws-ecs"
import * as ec2 from "#aws-cdk/aws-ec2"
import * as ecrassets from "#aws-cdk/aws-ecr-assets"
// See https://docs.aws.amazon.com/cdk/api/latest/docs/aws-ecs-readme.html
export class Stack extends cdk.Stack {
constructor(scope: cdk.Construct, id: string, props?: cdk.StackProps) {
super(scope, id, props)
/**
* Repository & Image
*/
const apiDockerImage = new ecrassets.DockerImageAsset(
this,
`my-api-image`,
{
directory: `.`,
exclude: [`cdk.out`, `cdk`, `.git`]
}
)
/**
* Cluster
*/
const myCluster = new ecs.Cluster(this, "Cluster", {})
// Add Spot Capacity to the Cluster
myCluster.addCapacity(`spot-auto-scaling-group-capacity`, {
maxCapacity: 2,
minCapacity: 1,
instanceType: new ec2.InstanceType(`r5a.large`),
spotPrice: `0.0400`,
spotInstanceDraining: true
})
// A task Definition describes what a single copy of a task should look like
const myApiFargateTaskDefinition = new ecs.FargateTaskDefinition(
this,
`api-fargate-task-definition`,
{
cpu: 2048,
memoryLimitMiB: 8192,
}
)
// Add image to task def
myApiFargateTaskDefinition.addContainer(`api-container`, {
image: ecs.ContainerImage.fromEcrRepository(
apiDockerImage.repository,
`latest`
),
})
// And the service attaching the task def to the cluster
const myApiService = new ecs.FargateService(
this,
`my-api-fargate-service`,
{
cluster: myCluster,
taskDefinition: myApiFargateTaskDefinition,
desiredCount: 1,
assignPublicIp: true,
}
)
}
}
The proper solution is to build your image outside of this deployment process and just get a reference to that image in ECR.
I'm new to Terraform, and I want to create a schema for the postgres database created on a PostgreSQL 9.6 instance on Google cloud SQL.
To create the PostgreSQL instance I have this on main.tf:
resource "google_sql_database_instance" "my-database" {
name = "my-${var.deployment_name}"
database_version = "POSTGRES_9_6"
region = "${var.deployment_region}"
settings {
tier = "db-f1-micro"
ip_configuration {
ipv4_enabled = true
}
}
}
The I was trying to create a PostgreSQL object like this:
provider "postgresql" {
host = "${google_sql_database_instance.my-database.ip_address}"
username = "postgres"
}
Finally creating the schema:
resource "postgresql_schema" "my_schema" {
name = "my_schema"
owner = "postgres"
}
However, this configurations do not work, we I run terraform plan:
Inappropriate value for attribute "host": string required.
If I remove the Postgres object:
Error: Error initializing PostgreSQL client: error detecting capabilities: error PostgreSQL version: dial tcp :5432: connect: connection refused
Additionally, I would like to add a password for the user postgres which is created by default when the PostgreSQL instance is created.
EDITED:
versions used
Terraform v0.12.10
+ provider.google v2.17.0
+ provider.postgresql v1.2.0
Any suggestions?
There are a few issues with the terraform set up that you have above.
Your instance does not have any authorized networks defined. You should change your instance resource to look like this: (Note: I used 0.0.0.0/0 just for testing purposes)
resource "google_sql_database_instance" "my-database" {
name = "my-${var.deployment_name}"
database_version = "POSTGRES_9_6"
region = "${var.deployment_region}"
settings {
tier = "db-f1-micro"
ip_configuration {
ipv4_enabled = true
authorized_networks {
name = "all"
value = "0.0.0.0/0"
}
}
}
depends_on = [
"google_project_services.vpc"
]
}
As mentioned here, you need to create a user with a strong password
resource "google_sql_user" "user" {
name = "test_user"
instance = "${google_sql_database_instance.my-database.name}"
password = "VeryStrongPassword"
depends_on = [
"google_sql_database_instance.my-database"
]
}
You should use the "public_ip_address" or "ip_address.0.ip_address" attribute of your instance to access the ip address. Also, you should update your provider and schema resource to reflect the user created above.
provider "postgresql" {
host = "${google_sql_database_instance.my-database.public_ip_address}"
username = "${google_sql_user.user.name}"
password = "${google_sql_user.user.password}"
}
resource "postgresql_schema" "my_schema" {
name = "my_schema"
owner = "test_user"
}
Your postgres provider is dependent on the google_sql_database_instance resource to be done before it is able to set up the provider:
All the providers are initialized at the beginning of plan/apply so if one has an invalid config (in this case an empty host) then Terraform will fail.
There is no way to define the dependency between a provider and a
resource within another provider.
There is however a workaround by using the target parameter
terraform apply -target=google_sql_user.user
This will create the database user (as well as all its dependencies - in this case the database instance) and once that completes follow it with:
terraform apply
This should then succeed as the instance has already been created and the ip_address is available to be used by the postgres provider.
Final Note: Usage of public ip addresses without SSL to connect to Cloud SQL instances is not recommended for production instances.
This was my solution, and this way I just need to run: terraform apply :
// POSTGRESQL INSTANCE
resource "google_sql_database_instance" "my-database" {
database_version = "POSTGRES_9_6"
region = var.deployment_region
settings {
tier = var.db_machine_type
ip_configuration {
ipv4_enabled = true
authorized_networks {
name = "my_ip"
value = var.db_allowed_networks.my_network_ip
}
}
}
}
// DATABASE USER
resource "google_sql_user" "user" {
name = var.db_credentials.db_user
instance = google_sql_database_instance.my-database.name
password = var.db_credentials.db_password
depends_on = [
"google_sql_database_instance.my-database"
]
provisioner "local-exec" {
command = "psql postgresql://${google_sql_user.user.name}:${google_sql_user.user.password}#${google_sql_database_instance.my-database.public_ip_address}/postgres -c \"CREATE SCHEMA myschema;\""
}
}
I am using terraform v0.10.6 to spin up a droplet on digitalocean. I am referencing a key and SSH fingerprint that has already been added to digitalocean in my terraform config (copied below). I am able to log onto existing droplets using this ssh key but not on a newly formed droplet (SSH simply fails). Any thoughts on how to troubleshoot this so that when I launch the droplet via terraform, I should be able to log onto the droplet via the key that has already been added on digitalocean (and visible on DO console). Currently, the droplet appears on the digitalocean admin console but I am never able to SSH onto the server (connection gets denied).
test.tf
# add base droplet with name
resource "digitalocean_droplet" "do-mail" {
image = "ubuntu-16-04-x64"
name = "tmp.validdomain.com"
region = "nyc3"
size = "1gb"
private_networking = true
ssh_keys = [
"${var.ssh_fingerprint}",
]
connection {
user = "root"
type = "ssh"
private_key = "${file(var.private_key)}"
timeout = "2m"
}
provisioner "remote-exec" {
inline = [
"export PATH=$PATH:/usr/bin",
"sudo apt-get update",
]
}
}
terraform.tfvars
digitalocean_token = "correcttoken"
public_key = "~/.ssh/id_rsa.pub"
private_key = "~/.ssh/id_rsa"
ssh_fingerprint = "correct:finger:print"
provider.tf
provider "digitalocean" {
token = "${var.digitalocean_token}"
}
variables.tf
##variables used by terraform
# DO token
variable "digitalocean_token" {
type = "string"
}
# DO public key file location on local server
variable "public_key" {
type = "string"
}
# DO private key file location on local server
variable "private_key" {
type = "string"
}
# DO ssh key fingerprint
variable "ssh_fingerprint" {
type = "string"
}
I was able to setup a new droplet with the SSH key at initialization time when I specified the digitalocean token as an environment variable (as opposed to relying on the terraform.tfvars file).