I am trying some azure documentation Quickstart tutorial to create a resource group with one SQL Server and one database. The code runs just fine and I am able to create all the resource. Now I was curious how can I run in the same script the code to create a readonly user inside the database I am creating?
This is the code I have:
import os
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.sql import SqlManagementClient
REGION = 'westus'
GROUP_NAME = 'resource-group-name'
SERVER_NAME = 'server-name'
DATABASE_NAME = 'sample-db'
def run_example():
subscription_id = os.environ.get(
'AZURE_SUBSCRIPTION_ID',
'11111-11-1111-11111-111111') # your Azure Subscription Id
credentials = ServicePrincipalCredentials(
client_id='my-client-id',
secret='my-secret',
tenant='tenant'
)
resource_client = ResourceManagementClient(credentials, subscription_id)
sql_client = SqlManagementClient(credentials, subscription_id)
# You MIGHT need to add SQL as a valid provider for these credentials
# If so, this operation has to be done only once for each credentials
resource_client.providers.register('Microsoft.Sql')
# Create Resource group
print('Create Resource Group')
resource_group_params = {'location': 'westus'}
print_item(resource_client.resource_groups.create_or_update(
GROUP_NAME, resource_group_params))
# Create a SQL server
print('Create a SQL server')
server = sql_client.servers.create_or_update(
GROUP_NAME,
SERVER_NAME,
{
'location': REGION,
'version': '12.0', # Required for create
'administrator_login': 'server-login', # Required for create
'administrator_login_password': 'pass-word' # Required for create
}
)
print_item(server)
print('\n\n')
# Get SQL server
print('Get SQL server')
server = sql_client.servers.get_by_resource_group(
GROUP_NAME,
SERVER_NAME,
)
print_item(server)
print("\n\n")
# List SQL servers by resource group
print('List SQL servers in a resource group')
for item in sql_client.servers.list_by_resource_group(GROUP_NAME):
print_item(item)
print("\n\n")
# List SQL servers by subscription
print('List SQL servers in a subscription')
for item in sql_client.servers.list():
print_item(item)
print("\n\n")
# List SQL servers usage
print('List SQL servers usage')
for item in sql_client.servers.list_usages(GROUP_NAME, SERVER_NAME):
print_metric(item)
print("\n\n")
# Create a database
print('Create SQL database')
async_db_create = sql_client.databases.create_or_update(
GROUP_NAME,
SERVER_NAME,
DATABASE_NAME,
{
'location': REGION
}
)
# Wait for completion and return created object
database = async_db_create.result()
print_item(database)
print("\n\n")
# Get SQL database
print('Get SQL database')
database = sql_client.databases.get(
GROUP_NAME,
SERVER_NAME,
DATABASE_NAME
)
print_item(database)
print("\n\n")
# List SQL databases by server
print('List SQL databases in a server')
for item in sql_client.databases.list_by_server(GROUP_NAME, SERVER_NAME):
print_item(item)
print("\n\n")
# List SQL database usage
print('List SQL database usage')
for item in sql_client.databases.list_usages(GROUP_NAME, SERVER_NAME, DATABASE_NAME):
print_metric(item)
print("\n\n")
def print_item(group):
"""Print an Azure object instance."""
print("\tName: {}".format(group.name))
print("\tId: {}".format(group.id))
print("\tLocation: {}".format(group.location))
if hasattr(group, 'tags'):
print("\tTags: {}".format(group.tags))
if hasattr(group, 'properties'):
print_properties(group.properties)
def print_metric(group):
"""Print an SQL metric."""
print("\tResource Name: {}".format(group.resource_name))
print("\tName: {}".format(group.display_name))
print("\tValue: {}".format(group.current_value))
print("\tUnit: {}".format(group.unit))
def print_properties(props):
"""Print a ResourceGroup properties instance."""
if props and props.provisioning_state:
print("\tProperties:")
print("\t\tProvisioning State: {}".format(props.provisioning_state))
print("\n\n")
if __name__ == "__main__":
run_example()
I am missing this last bit where I want to create this readonly user inside the database I am creating. Thank you very much for your time and help guys
Create user in Azure SQL database is very different with create database instance. It needs the admin account or the enough permission, and the user binds the login, the login must be created in master DB, and the user must be created in current user D, then alter the database role to the user. The code you user is not suitable for create the user.
Even with pyodbc script, you still need the connection string, specify the database/user,/password. The limit is you can't access master DB and user database with one connection string or SQL database connection.
I'm afraid to say we can't do that with the code.
Related
I have a connection problem with Cloud Sql Postgres from my Flask Rest API app.
I have a db.py file:
import os
from flask_sqlalchemy import SQLAlchemy
import sqlalchemy
db = SQLAlchemy()
def connect_unix_socket() -> sqlalchemy.engine.base.Engine:
""" Initializes a Unix socket connection pool for a Cloud SQL instance of Postgres. """
# Note: Saving credentials in environment variables is convenient, but not
# secure - consider a more secure solution such as
# Cloud Secret Manager (https://cloud.google.com/secret-manager) to help
# keep secrets safe.
db_user = os.environ["DB_USER"] # e.g. 'my-database-user'
db_pass = os.environ["DB_PASS"] # e.g. 'my-database-password'
db_name = os.environ["DB_NAME"] # e.g. 'my-database'
unix_socket_path = os.environ["INSTANCE_UNIX_SOCKET"] # e.g. '/cloudsql/project:region:instance'
pool = sqlalchemy.create_engine(
# Equivalent URL:
# postgresql+pg8000://<db_user>:<db_pass>#/<db_name>
# ?unix_sock=<INSTANCE_UNIX_SOCKET>/.s.PGSQL.5432
# Note: Some drivers require the `unix_sock` query parameter to use a different key.
# For example, 'psycopg2' uses the path set to `host` in order to connect successfully.
sqlalchemy.engine.url.URL.create(
drivername="postgresql+pg8000",
username=db_user,
password=db_pass,
database=db_name,
query={"unix_sock": "{}/.s.PGSQL.5432".format(unix_socket_path)},
),
# [START_EXCLUDE]
# Pool size is the maximum number of permanent connections to keep.
pool_size=5,
# Temporarily exceeds the set pool_size if no connections are available.
max_overflow=2,
# The total number of concurrent connections for your application will be
# a total of pool_size and max_overflow.
# 'pool_timeout' is the maximum number of seconds to wait when retrieving a
# new connection from the pool. After the specified amount of time, an
# exception will be thrown.
pool_timeout=30, # 30 seconds
# 'pool_recycle' is the maximum number of seconds a connection can persist.
# Connections that live longer than the specified amount of time will be
# re-established
pool_recycle=1800, # 30 minutes
# [END_EXCLUDE]
)
return pool
I import the db.py file in my app.py file:
import os
import sqlalchemy
from flask import Flask
from flask_smorest import Api
from flask_sqlalchemy import SQLAlchemy
from db import db, connect_unix_socket
import models
from resources.user import blp as UserBlueprint
# pylint: disable=C0103
app = Flask(__name__)
def init_connection_pool() -> sqlalchemy.engine.base.Engine:
# use a Unix socket when INSTANCE_UNIX_SOCKET (e.g. /cloudsql/project:region:instance) is defined
if unix_socket_path:
return connect_unix_socket()
raise ValueError(
"Missing database connection type. Please define one of INSTANCE_HOST, INSTANCE_UNIX_SOCKET, or INSTANCE_CONNECTION_NAME"
)
db = None
#app.before_first_request
def init_db() -> sqlalchemy.engine.base.Engine:
global db
db = init_connection_pool()
api = Api(app)
#app.route("/api")
def user_route():
return "Welcome user API!"
api.register_blueprint(UserBlueprint)
if __name__ == '__main__':
server_port = os.environ.get('PORT', '8080')
app.run(debug=True, port=server_port, host='0.0.0.0')
The app run correctly, when i call the end point to Get or Post users, the app crash and give me this error:
"The current Flask app is not registered with this 'SQLAlchemy'"
RuntimeError: The current Flask app is not registered with this 'SQLAlchemy' instance. Did you forget to call 'init_app', or did you create multiple 'SQLAlchemy' instances?
This is my User.py class:
from sqlalchemy.exc import SQLAlchemyError, IntegrityError
from db import db
from models import UserModel
from schemas import UserSchema
blp = Blueprint("Users", "users", description="Operations on users")
#blp.route("/user/<string:user_id>")
class User(MethodView):
#blp.response(200, UserSchema)
def get(self, user_id):
user = UserModel.query.get_or_404(user_id)
return user
def delete(self, user_id):
user = UserModel.query.get_or_404(user_id)
db.session.delete(user)
db.session.commit()
return {"message": "User deleted"}, 200
#blp.route("/user")
class UserList(MethodView):
#blp.response(200, UserSchema(many=True))
def get(self):
return UserModel.query.all()
How i can fix this issue?
#dev_ Your issue is that your are trying to intermingle the use of SQLAlchemy Core with SQLAlchemy ORM as if they are the same thing, leading to your issues. SQLAlchemy connection pools created using sqlalchemy.create_engine use the CORE API while Flask-SQLAlchemy uses the SQLAlchemy ORM model. This is the core reason for you issue. It is easier to use one or the other.
I would recommend using purely Flask-SQLALchemy with the use of the cloud-sql-python-connector library for your use-case. It will make your life much easier.
For simplicity, I am getting rid of your db.py leading to your app.py file being as follows:
from flask import Flask
from flask_smorest import Api
from flask_sqlalchemy import SQLAlchemy
from google.cloud.sql.connector import Connector, IPTypes
from resources.user import blp as UserBlueprint
# load env vars
db_user = os.environ["DB_USER"] # e.g. 'my-database-user'
db_pass = os.environ["DB_PASS"] # e.g. 'my-database-password'
db_name = os.environ["DB_NAME"] # e.g. 'my-database'
instance_connection_name = os.environ["INSTANCE_CONNECTION_NAME"] # e.g. 'project:region:instance'
# Python Connector database connection function
def getconn():
with Connector() as connector:
conn = connector.connect(
instance_connection_name, # Cloud SQL Instance Connection Name
"pg8000",
user=db_user,
password=db_pass,
db=db_name,
ip_type= IPTypes.PUBLIC # IPTypes.PRIVATE for private IP
)
return conn
app = Flask(__name__)
# configure Flask-SQLAlchemy to use Python Connector
app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql+pg8000://"
app.config['SQLALCHEMY_ENGINE_OPTIONS'] = {
"creator": getconn
}
# initialize db (using app!)
db = SQLAlchemy(app)
# rest of your code
api = Api(app)
# ...
Hope this helps resolve your issue!
I'm new to Terraform, and I want to create a schema for the postgres database created on a PostgreSQL 9.6 instance on Google cloud SQL.
To create the PostgreSQL instance I have this on main.tf:
resource "google_sql_database_instance" "my-database" {
name = "my-${var.deployment_name}"
database_version = "POSTGRES_9_6"
region = "${var.deployment_region}"
settings {
tier = "db-f1-micro"
ip_configuration {
ipv4_enabled = true
}
}
}
The I was trying to create a PostgreSQL object like this:
provider "postgresql" {
host = "${google_sql_database_instance.my-database.ip_address}"
username = "postgres"
}
Finally creating the schema:
resource "postgresql_schema" "my_schema" {
name = "my_schema"
owner = "postgres"
}
However, this configurations do not work, we I run terraform plan:
Inappropriate value for attribute "host": string required.
If I remove the Postgres object:
Error: Error initializing PostgreSQL client: error detecting capabilities: error PostgreSQL version: dial tcp :5432: connect: connection refused
Additionally, I would like to add a password for the user postgres which is created by default when the PostgreSQL instance is created.
EDITED:
versions used
Terraform v0.12.10
+ provider.google v2.17.0
+ provider.postgresql v1.2.0
Any suggestions?
There are a few issues with the terraform set up that you have above.
Your instance does not have any authorized networks defined. You should change your instance resource to look like this: (Note: I used 0.0.0.0/0 just for testing purposes)
resource "google_sql_database_instance" "my-database" {
name = "my-${var.deployment_name}"
database_version = "POSTGRES_9_6"
region = "${var.deployment_region}"
settings {
tier = "db-f1-micro"
ip_configuration {
ipv4_enabled = true
authorized_networks {
name = "all"
value = "0.0.0.0/0"
}
}
}
depends_on = [
"google_project_services.vpc"
]
}
As mentioned here, you need to create a user with a strong password
resource "google_sql_user" "user" {
name = "test_user"
instance = "${google_sql_database_instance.my-database.name}"
password = "VeryStrongPassword"
depends_on = [
"google_sql_database_instance.my-database"
]
}
You should use the "public_ip_address" or "ip_address.0.ip_address" attribute of your instance to access the ip address. Also, you should update your provider and schema resource to reflect the user created above.
provider "postgresql" {
host = "${google_sql_database_instance.my-database.public_ip_address}"
username = "${google_sql_user.user.name}"
password = "${google_sql_user.user.password}"
}
resource "postgresql_schema" "my_schema" {
name = "my_schema"
owner = "test_user"
}
Your postgres provider is dependent on the google_sql_database_instance resource to be done before it is able to set up the provider:
All the providers are initialized at the beginning of plan/apply so if one has an invalid config (in this case an empty host) then Terraform will fail.
There is no way to define the dependency between a provider and a
resource within another provider.
There is however a workaround by using the target parameter
terraform apply -target=google_sql_user.user
This will create the database user (as well as all its dependencies - in this case the database instance) and once that completes follow it with:
terraform apply
This should then succeed as the instance has already been created and the ip_address is available to be used by the postgres provider.
Final Note: Usage of public ip addresses without SSL to connect to Cloud SQL instances is not recommended for production instances.
This was my solution, and this way I just need to run: terraform apply :
// POSTGRESQL INSTANCE
resource "google_sql_database_instance" "my-database" {
database_version = "POSTGRES_9_6"
region = var.deployment_region
settings {
tier = var.db_machine_type
ip_configuration {
ipv4_enabled = true
authorized_networks {
name = "my_ip"
value = var.db_allowed_networks.my_network_ip
}
}
}
}
// DATABASE USER
resource "google_sql_user" "user" {
name = var.db_credentials.db_user
instance = google_sql_database_instance.my-database.name
password = var.db_credentials.db_password
depends_on = [
"google_sql_database_instance.my-database"
]
provisioner "local-exec" {
command = "psql postgresql://${google_sql_user.user.name}:${google_sql_user.user.password}#${google_sql_database_instance.my-database.public_ip_address}/postgres -c \"CREATE SCHEMA myschema;\""
}
}
I have a module which contains resources for:
azure postgres server
azure postgres database
postgres role (user)
postgres provider (for the server and used to create the role)
In one of my env directories I can have 0-N .tf files which is an instance of that module and each specify database name etc. So if I add another .tf file with a new name then a new database server with a database will be provisioned. All this works fine.
However, if I now delete an existing database module (one of the .tf files in my env directory) I run into issues. Terraform will now try to get the state of all the previously existing resources and since that specific provider (for that postgres server) now is gone terraform cannot get the state of the created postgres role, with the output a provider configuration block is required for all operations.
I understand why this happens but I cannot figure out how to solve this. I want to "dynamically" create (and remove) postgres servers with a database on them but this requires "dynamic" providers which then makes me get stuck on this.
Example of how it looks
resource "azurerm_postgresql_server" "postgresserver" {
name = "${var.db_name}-server"
location = "${var.location}"
resource_group_name = "${var.resource_group}"
sku = ["${var.vmSize}"]
storage_profile = ["${var.storage}"]
administrator_login = "psqladminun"
administrator_login_password = "${random_string.db-password.result}"
version = "${var.postgres_version}"
ssl_enforcement = "Disabled"
}
provider "postgresql" {
version = "0.1.0"
host = "${azurerm_postgresql_server.postgresserver.fqdn}"
port = 5432
database = "postgres"
username = "${azurerm_postgresql_server.postgresserver.administrator_login}#${azurerm_postgresql_server.postgresserver.name}".
password = "${azurerm_postgresql_server.postgresserver.administrator_login_password}"
}
resource "azurerm_postgresql_database" "db" {
name = "${var.db_name}"
resource_group_name = "${var.resource_group}"
server_name = "${azurerm_postgresql_server.postgresserver.name}"
charset = "UTF8"
collation = "English_United States.1252"
}
resource "postgresql_role" "role" {
name = "${random_string.user.result}"
login = true
connection_limit = 100
password = "${random_string.pass.result}"
create_role = true
create_database = true
depends_on = ["azurerm_postgresql_database.db"]
}
Above you see how we, in the module create a postgres server, postgres db and also a postgres role (where only the role utilizes the postgres provider). So if I now define an instance datadb.tf such as:
module "datadb" {
source = "../../modules/postgres"
db_name = "datadb"
resource_group = "${azurerm_resource_group.resource-group.name}"
location = "${azurerm_resource_group.resource-group.location}"
}
then it will be provisioned successfully. The issue is if I later on delete that same file (datadb.tf) then the planning fails because it will try to get the state of the postgres role without having the postgres provider present.
The postgres provider is only needed for the postgres role which will be destroyed as soon as the azure provider destroys the postgres db and postgres server, so the actual removal of that role is not necessary. Is there a way to tell terraform that "if this resource should be removed, you don't have to do anything because it will be removed dependent on being removed"? Or does anyone see any other solutions?
I hope my goal and issue is clear, thanks!
I think the only solution is a two-step solution, but I think it's still clean enough.
What I would do is have two files per database (name them how you want).
db-1-infra.tf
db-1-pgsql.tf
Put everything except your postgres resources in db-1-infra.tf
resource "azurerm_postgresql_server" "postgresserver" {
name = "${var.db_name}-server"
location = "${var.location}"
resource_group_name = "${var.resource_group}"
sku = ["${var.vmSize}"]
storage_profile = ["${var.storage}"]
administrator_login = "psqladminun"
administrator_login_password = "${random_string.db-password.result}"
version = "${var.postgres_version}"
ssl_enforcement = "Disabled"
}
provider "postgresql" {
version = "0.1.0"
host = "${azurerm_postgresql_server.postgresserver.fqdn}"
port = 5432
database = "postgres"
username = "${azurerm_postgresql_server.postgresserver.administrator_login}#${azurerm_postgresql_server.postgresserver.name}".
password = "${azurerm_postgresql_server.postgresserver.administrator_login_password}"
}
resource "azurerm_postgresql_database" "db" {
name = "${var.db_name}"
resource_group_name = "${var.resource_group}"
server_name = "${azurerm_postgresql_server.postgresserver.name}"
charset = "UTF8"
collation = "English_United States.1252"
}
Put your PostgreSQL resources in db-1-pgsql.tf
resource "postgresql_role" "role" {
name = "${random_string.user.result}"
login = true
connection_limit = 100
password = "${random_string.pass.result}"
create_role = true
create_database = true
depends_on = ["azurerm_postgresql_database.db"]
}
When you want to get rid of your database, first delete the file db-1-pgsql.tf and apply. Next, delete db-1-infra.tf and apply again.
The first step will destroy all postgres resources and free you up to run the second step, which will remove the postgres provider for that database.
I managed to nicely get Terraform creating databases and roles in an RDS Postgres database but due to the stripped down permissions of the rds_superuser I can't see an easy way to destroy the created databases that are owned by another user.
Using the following config:
resource "postgresql_role" "app" {
name = "app"
login = true
password = "foo"
skip_reassign_owned = true
}
resource "postgresql_database" "database" {
name = "app_database"
owner = "${postgresql_role.app.name}"
}
(For reference the skip_reassign_owned is required because the rds_superuser group doesn't get the necessary permissions to reassign ownership)
leads to this error:
Error applying plan:
1 error(s) occurred:
* postgresql_database.database (destroy): 1 error(s) occurred:
* postgresql_database.database: Error dropping database: pq: must be owner of database debug_db1
Terraform does not automatically rollback in the face of errors.
Instead, your Terraform state file has been partially updated with
any resources that successfully completed. Please address the error
above and apply again to incrementally change your infrastructure.
Using local-exec provisioners I was able to grant the role that owned the database to the admin user and the application user:
resource "aws_db_instance" "database" {
...
}
provider "postgresql" {
host = "${aws_db_instance.database.address}"
port = 5432
username = "myadminuser"
password = "adminpassword"
sslmode = "require"
connect_timeout = 15
}
resource "postgresql_role" "app" {
name = "app"
login = true
password = "apppassword"
skip_reassign_owned = true
}
resource "postgresql_role" "group" {
name = "${postgresql_role.app.name}_group"
skip_reassign_owned = true
provisioner "local-exec" {
command = "PGPASSWORD=adminpassword psql -h ${aws_db_instance.database.address} -U myadminuser postgres -c 'GRANT ${self.name} TO myadminuser, ${postgresql_role.app.name};'"
}
}
resource "postgresql_database" "database" {
name = "mydatabase"
owner = "${postgresql_role.group.name}"
}
which seems to work compared to setting ownership only for the app user. I do wonder if there's a better way I can do this without having to shell out in a local-exec though?
After raising this question I managed to raise a pull request with the fix that was released in in version 0.1.1 of the Postgresql provider so now works fine in the latest release of the provider.
My python application allows users to create schemas of their naming. I need a way to protect the application from sql injections.
The SQL to be executed reads
CREATE SCHEMA schema_name AUTHORIZATION user_name;
The psycopg documentation (generally) recommends passing parameters to execute like so
conn = psycopg2.connect("dbname=test user=postgres")
cur = conn.cursor()
query = 'CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s;'
params = ('schema_name', 'user_name')
cur.execute(query, params)
But this results in a query with single quotes, which fails:
CREATE SCHEMA 'schema_name' AUTHORIZATION 'user_name';
> fail
Is there a way to remove the quotes, or should I just settle for stripping non-alphanumeric characters from the schema name and call it a day? The later seems kind of ugly, but should still work.
To pass identifiers use AsIs. But that exposes to SQL injection:
import psycopg2
from psycopg2.extensions import AsIs
conn = psycopg2.connect(database='cpn')
cursor = conn.cursor()
query = """CREATE SCHEMA %s AUTHORIZATION %s;"""
param = (AsIs('u1'), AsIs('u1; select * from user_table'))
print cursor.mogrify(query, param)
Output:
CREATE SCHEMA u1 AUTHORIZATION u1; select * from user_table;
Here's a boilerplate that might help. I've used environment variables but you can use a .conf or whatever you like.
Store your connection variables in a .env file:
db_host = "localhost"
db_port = "5432"
db_database = "postgres"
db_user = "postgres"
db_password = "postgres"
db_schema = "schema2"
Load params in your app.py and assign them to variables, then use the variables where required:
import psychopg2
from dotenv import load_dotenv
import database
# Load your environment variables here:
load_dotenv()
db_host = os.environ["db_host"]
db_port = os.environ["db_port"]
db_database = os.environ["db_database"]
db_user = os.environ["db_user"]
db_password = os.environ["db_password"]
db_schema = os.environ["db_schema"]
# Build Connection:
connection = psycopg2.connect(host=db_host,
port=db_port,
database=db_database,
user=db_user,
password=db_password
)
# Build Query Strings:
CREATE_SCHEMA = f"CREATE SCHEMA IF NOT EXISTS {schema};"
CREATE_TABLE1 = f"CREATE TABLE IF NOT EXISTS {schema}.table1 (...);"
CREATE_TABLE2 = f"CREATE TABLE IF NOT EXISTS {schema}.table2 (...);"
# Create Schema and Tables:
with connection:
with connection.cursor() as cursor:
cursor.execute(CREATE_SCHEMA)
cursor.execute(CREATE_TABLE1)
cursor.execute(CREATE_TABLE2)
As of psycopg2 >= 2.7, psycopg2.sql can be used to compose dynamic statements, which also guards from SQL injection.