When replcating couch db from remote system to local system through _changes feed, I get this error
`05-04 19:19:48.089: ERROR/CouchDB(984): [error] [<0.115.0>] Error in replication `a4b53e74a33b773bbca688073b6bbe0d+continuous` (triggered by document `cloud2airline`): {worker_died,<0.532.0>,
{process_died,<0.547.0>,
{function_clause,
[{string,tokens1,[undefined,";",[]]},
{mochiweb_util,parse_header,1},
{couch_httpd,get_boundary,1},
{couch_httpd,parse_multipart_request,3},
{couch_api_wrap,'-open_doc_revs/6-fun-1-',4},
{couch_api_wrap_httpc,process_stream_response,5},
{couch_api_wrap,'-open_doc_revs/6-fun-2-',4}]}}}
Restarting replication in 5 seconds.`
Node js proxy code
`'use strict';
/*!
* Middleware for forwarding a request to CouchDB.
*/
/**
* Module dependencies.
*/
var httpProxy = require('http-proxy'),
util = require('./util');
module.exports = function(couch) {
var proxy = new httpProxy.HttpProxy(couch),
couchTarget = couch.target;
httpProxy.setMaxSockets(200);
return function(req, res, next) {
req.headers['host'] = couchTarget.host + ':' + couchTarget.port;
req.headers['authorization'] = couch.credentials;
req.headers['x-forwarded-ssl'] = util.isSecureForwardedRequest(req);
var forwardedFor = req.headers['x-forwarded-for'];
req.headers['x-real-ip'] = forwardedFor
? forwardedFor.split(',',1)[0]
: req.connection.remoteAddress;
req.url = couch.path + req.url;
console.log(req.headers['x-forwarded-for']);
console.log(req.headers['x-real-ip']);
console.log(req.headers['x-forwarded-ssl'] );
console.log(couchTarget.host + ":" + couchTarget.port + req.url);
return proxy.proxyRequest(req, res);
}
}`
Related
I am attempting to make an HTTP PUT request from XLRelease to update data in Adobe Workfront. I have been able to successfully login using the API client and GET data. I have also been able to successfully update data using Postman as well as using a native Python script. I am using the HttpRequest library within XLR. I am receiving the same response back in XLR as I am when successfully updating when using Postman, however, the data is not updated when using XLR.
My code is as follows:
import json
WORKFRONT_API_HOST = releaseVariables['url']
WORKFRONT_API_VERSION = releaseVariables['wfApiVersion']
WORKFRONT_API_KEY = releaseVariables['apiKey']
WORKFRONT_USERNAME = releaseVariables['wfUsername']
FI_ID = releaseVariables['target_customer_id']
newPortfolioId = releaseVariables['target_portfolio_id']
WORKFRONT_API_URL = WORKFRONT_API_HOST + WORKFRONT_API_VERSION
def wfLogin():
sessionID = ""
login_endpoint = "/login"
login_request = HttpRequest({'url': WORKFRONT_API_URL})
login_response = login_request.get(login_endpoint + "?apiKey=" + str(WORKFRONT_API_KEY).replace("u'","'") + "&username=" + WORKFRONT_USERNAME, contentType='application/json')
if login_response.getStatus() != 200:
print('# Error logging into WF\n')
print(login_response.getStatus())
print(login_response.errorDump())
sys.exit(1)
else:
json_response = json.loads(login_response.getResponse())
print ("Logged in to WF")
sessionID = json_response['data']['sessionID']
return sessionID
def wfLogout(sessionID):
logout_endpoint = "/logout"
logout_request = HttpRequest({'url': WORKFRONT_API_URL})
logout_response = logout_request.get(logout_endpoint + "?sessionID=" + sessionID, contentType='application/json')
if logout_response.getStatus() != 200:
print('# Error logging out of WF\n')
print(logout_response.getStatus())
print(logout_response.errorDump())
sys.exit(1)
else:
json_response = json.loads(logout_response.getResponse())
print ("Logged out of WF")
result = []
session_id = wfLogin()
if session_id != "":
customer_request = HttpRequest({'url': WORKFRONT_API_URL})
endpoint = '/prgm/%s?sessionID=%s&portfolioID=%s&customerID=%s' % (FI_ID, session_id, newPortfolioId, FI_ID)
jsonObj = "{}"
payload = {}
customer_response = customer_request.put(endpoint, jsonObj, contentType='application/json')
if customer_response.getStatus() != 200:
print('# Error connecting to WF\n')
print(customer_response)
print(customer_response.getStatus())
print(customer_response.errorDump())
sys.exit(1)
else:
response_json = json.loads(customer_response.getResponse())
print ("response_json: ", response_json)
#log out of current session
wfLogout(session_id)
else:
print ("No sessionID is available")
sys.exit(1)
I want to execute a query on Flink SQL Table backed by kafka topic of secured kafka cluster. I'm able to execute the query programmatically but unable to do the same through Flink SQL client. I'm not sure on how to pass JAAS config (java.security.auth.login.config) and other system properties through Flink SQL client.
Flink SQL query programmatically
private static void simpleExec_auth() {
// Create the execution environment.
final EnvironmentSettings settings = EnvironmentSettings.newInstance()
.inStreamingMode()
.withBuiltInCatalogName(
"default_catalog")
.withBuiltInDatabaseName(
"default_database")
.build();
System.setProperty("java.security.auth.login.config","client_jaas.conf");
System.setProperty("sun.security.jgss.native", "true");
System.setProperty("sun.security.jgss.lib", "/usr/libexec/libgsswrap.so");
System.setProperty("javax.security.auth.useSubjectCredsOnly","false");
TableEnvironment tableEnvironment = TableEnvironment.create(settings);
String createQuery = "CREATE TABLE test_flink11 ( " + "`keyid` STRING, " + "`id` STRING, "
+ "`name` STRING, " + "`age` INT, " + "`color` STRING, " + "`rowtime` TIMESTAMP(3) METADATA FROM 'timestamp', " + "`proctime` AS PROCTIME(), " + "`address` STRING) " + "WITH ( "
+ "'connector' = 'kafka', "
+ "'topic' = 'test_flink10', "
+ "'scan.startup.mode' = 'latest-offset', "
+ "'properties.bootstrap.servers' = 'kafka01.nyc.com:9092', "
+ "'value.format' = 'avro-confluent', "
+ "'key.format' = 'avro-confluent', "
+ "'key.fields' = 'keyid', "
+ "'value.fields-include' = 'EXCEPT_KEY', "
+ "'properties.security.protocol' = 'SASL_PLAINTEXT', 'properties.sasl.kerberos.service.name' = 'kafka', 'properties.sasl.kerberos.kinit.cmd' = '/usr/local/bin/skinit --quiet', 'properties.sasl.mechanism' = 'GSSAPI', "
+ "'key.avro-confluent.schema-registry.url' = 'http://kafka-schema-registry:5037', "
+ "'key.avro-confluent.schema-registry.subject' = 'test_flink6', "
+ "'value.avro-confluent.schema-registry.url' = 'http://kafka-schema-registry:5037', "
+ "'value.avro-confluent.schema-registry.subject' = 'test_flink4')";
System.out.println(createQuery);
tableEnvironment.executeSql(createQuery);
TableResult result = tableEnvironment
.executeSql("SELECT name,rowtime FROM test_flink11");
result.print();
}
This is working fine.
Flink SQL query through SQL client
Running this giving the following error.
Flink SQL> CREATE TABLE test_flink11 (`keyid` STRING,`id` STRING,`name` STRING,`address` STRING,`age` INT,`color` STRING) WITH('connector' = 'kafka', 'topic' = 'test_flink10','scan.startup.mode' = 'earliest-offset','properties.bootstrap.servers' = 'kafka01.nyc.com:9092','value.format' = 'avro-confluent','key.format' = 'avro-confluent','key.fields' = 'keyid', 'value.avro-confluent.schema-registry.url' = 'http://kafka-schema-registry:5037', 'value.avro-confluent.schema-registry.subject' = 'test_flink4', 'value.fields-include' = 'EXCEPT_KEY', 'key.avro-confluent.schema-registry.url' = 'http://kafka-schema-registry:5037', 'key.avro-confluent.schema-registry.subject' = 'test_flink6', 'properties.security.protocol' = 'SASL_PLAINTEXT', 'properties.sasl.kerberos.service.name' = 'kafka', 'properties.sasl.kerberos.kinit.cmd' = '/usr/local/bin/skinit --quiet', 'properties.sasl.mechanism' = 'GSSAPI');
Flink SQL> select * from test_flink11;
[ERROR] Could not execute SQL statement. Reason:
java.lang.IllegalArgumentException: Could not find a 'KafkaClient' entry in the JAAS configuration. System property 'java.security.auth.login.config' is /tmp/jaas-6309821891889949793.conf
There is nothing in /tmp/jaas-6309821891889949793.conf except the following comment
# We are using this file as an workaround for the Kafka and ZK SASL implementation
# since they explicitly look for java.security.auth.login.config property
# Please do not edit/delete this file - See FLINK-3929
SQL client run command
bin/sql-client.sh embedded --jar flink-sql-connector-kafka_2.11-1.12.0.jar --jar flink-sql-avro-confluent-registry-1.12.0.jar
Flink cluster command
bin/start-cluster.sh
How to pass this java.security.auth.login.config and other system properties (that I'm setting in the above java code snippet), for SQL client?
flink-conf.yaml
security.kerberos.login.use-ticket-cache: true
security.kerberos.login.principal: XXXXX#HADOOP.COM
security.kerberos.login.use-ticket-cache: false
security.kerberos.login.keytab: /path/to/kafka.keytab
security.kerberos.login.principal: XXXX#HADOOP.COM
security.kerberos.login.contexts: Client,KafkaClient
I haven't really tested whether this solution is feasible, you can try it out, hope it will help you.
I make a specific development of the db_backup openerp 7 module to running on version 8 Odoo.
So it is properly installed and it does backup
the problem is that the sql file size is zero KB
this is the code backup_scheduler.py
import xmlrpclib
import socket
import os
import time
import base64
from openerp.osv import fields,osv,orm
from openerp import tools, netsvc
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
def execute(connector, method, *args):
res = False
try:
res = getattr(connector,method)(*args)
except socket.error,e:
raise e
return res
addons_path = tools.config['addons_path'] + '/auto_backup/DBbackups'
class db_backup(osv.Model):
_name = 'db.backup'
def get_db_list(self, cr, user, ids, host='localhost', port='8069', context={}):
uri = 'http://' + host + ':' + port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn, 'list')
return db_list
_columns = {
'host' : fields.char('Host', size=100, required='True'),
'port' : fields.char('Port', size=10, required='True'),
'name' : fields.char('Database', size=100, required='True',help='Database you want to schedule backups for'),
'bkp_dir' : fields.char('Backup Directory', size=100, help='Absolute path for storing the backups', required='True')
}
_defaults = {
'bkp_dir' : lambda *a : addons_path,
'host' : lambda *a : 'localhost',
'port' : lambda *a : '8069'
}
def _check_db_exist(self, cr, user, ids):
for rec in self.browse(cr,user,ids):
db_list = self.get_db_list(cr, user, ids, rec.host, rec.port)
if rec.name in db_list:
return True
return False
_constraints = [
(_check_db_exist, _('Error ! No such database exists!'), [])
]
def schedule_backup(self, cr, user, context={}):
conf_ids= self.search(cr, user, [])
confs = self.browse(cr,user,conf_ids)
for rec in confs:
db_list = self.get_db_list(cr, user, [], rec.host, rec.port)
if rec.name in db_list:
try:
if not os.path.isdir(rec.bkp_dir):
os.makedirs(rec.bkp_dir)
except:
raise
bkp_file='%s_%s.sql' % (rec.name, time.strftime('%Y%m%d_%H_%M_%S'))
file_path = os.path.join(rec.bkp_dir,bkp_file)
fp = open(file_path,'wb')
uri = 'http://' + rec.host + ':' + rec.port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
bkp=''
try:
bkp = execute(conn, 'dump', tools.config['admin_passwd'], rec.name)
except:
logger.notifyChannel('backup', netsvc.LOG_INFO, "Could'nt backup database %s. Bad database administrator password for server running at http://%s:%s" %(rec.name, rec.host, rec.port))
continue
bkp = base64.decodestring(bkp)
fp.write(bkp)
fp.close()
else:
logger.notifyChannel('backup', netsvc.LOG_INFO, "database %s doesn't exist on http://%s:%s" %(rec.name, rec.host, rec.port))
db_backup()
This is probably because an error occurs on the backend that does not get propagated back to the client. If you check the server logs at the time you take a backup you will probably see the issue.
Note If you need a script to get a backup from or restore a database to a v7, v8 or v9 server take a look at https://github.com/daramousk/odoo_remote_backup
I have developed a script for this specific reason which you can use or change to resolve your issue.
SoftLayer Object Storage is based on the OpenStack Swift object store.
SoftLayer provide SDKs for their object storage in Python, Ruby, Java and PHP, but not in .NET. Searching for .NET SDKs for OpenStack, I came across OpenStack.NET.
Based on this question OpenStack.NET is designed for use with Rackspace by default, but can be made to work with other OpenStack providers using CloudIdentityWithProject and OpenStackIdentityProvider.
SoftLayer provide the following information for connecting to their Object Storage:
Authentication Endpoint
Public: https://mel01.objectstorage.softlayer.net/auth/v1.0/
Private: https://mel01.objectstorage.service.networklayer.com/auth/v1.0/
Username:
SLOS123456-1:email#example.com
API Key (Password):
1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef
It's not obvious how this would map to the fields of CloudIdentityWithProject, and OpenStackIdentityProvider but I tried the following and a few other combinations of project name / username / uri:
var cloudIdentity = new CloudIdentityWithProject()
{
ProjectName = "SLOS123456-1",
Username = "email#example.com",
Password = "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
};
var identityProvider = new OpenStackIdentityProvider(
new Uri("https://mel01.objectstorage.softlayer.net/auth/v1.0/"),
cloudIdentity);
var token = identityProvider.GetToken(null);
However, in all cases I received the following error:
Unable to authenticate user and retrieve authorized service endpoints
Based on reviewing the source code for SoftLayer's other language libraries and for OpenStack.NET, it looks like SoftLayer's object storage uses V1 auth, while OpenStack.NET is using V2 auth.
Based on this article from SoftLayer and this article from SwiftStack, V1 auth uses a /auth/v1.0/ path (like the one provided by SoftLayer), with X-Auth-User and X-Auth-Key headers as arguments, and with the response contained in headers like the following:
X-Auth-Token-Expires = 83436
X-Auth-Token = AUTH_tk1234567890abcdef1234567890abcdef
X-Storage-Token = AUTH_tk1234567890abcdef1234567890abcdef
X-Storage-Url = https://mel01.objectstorage.softlayer.net/v1/AUTH_12345678-1234-1234-1234-1234567890ab
X-Trans-Id = txbc1234567890abcdef123-1234567890
Connection = keep-alive
Content-Length = 1300
Content-Type = text/html; charset=UTF-8
Date = Wed, 14 Oct 2015 01:19:45 GMT
Whereas V2 auth (identity API V2.0) uses a /v2.0/tokens path, with the request and response in JSON objects in the message body.
Based on the OpenStackIdentityProvider class in OpenStack.NET I hacked together my own SoftLayerOpenStackIdentityProvider like this:
using JSIStudios.SimpleRESTServices.Client;
using net.openstack.Core.Domain;
using net.openstack.Providers.Rackspace;
using Newtonsoft.Json;
using Newtonsoft.Json.Linq;
using OpenStack.Authentication;
using System;
using System.Linq;
using System.Collections.Generic;
namespace OpenStackTest1
{
public class SoftLayerOpenStackIdentityProvider : CloudIdentityProvider
{
public SoftLayerOpenStackIdentityProvider(
Uri urlBase, CloudIdentity defaultIdentity)
: base(defaultIdentity, null, null, urlBase)
{
if (urlBase == null)
throw new ArgumentNullException("urlBase");
}
public override UserAccess GetUserAccess(
CloudIdentity identity, bool forceCacheRefresh = false)
{
identity = identity ?? DefaultIdentity;
Func<UserAccess> refreshCallback =
() =>
{
// Set up request headers.
Dictionary<string, string> headers =
new Dictionary<string, string>();
headers["X-Auth-User"] = identity.Username;
headers["X-Auth-Key"] = identity.APIKey;
// Make the request.
JObject requestBody = null;
var response = ExecuteRESTRequest<JObject>(
identity,
UrlBase,
HttpMethod.GET,
requestBody,
headers: headers,
isTokenRequest: true);
if (response == null || response.Data == null)
return null;
// Get response headers.
string authToken = response.Headers.Single(
h => h.Key == "X-Auth-Token").Value;
string storageUrl = response.Headers.Single(
h => h.Key == "X-Storage-Url").Value;
string tokenExpires = response.Headers.Single(
h => h.Key == "X-Auth-Token-Expires").Value;
// Convert expiry from seconds to a date.
int tokenExpiresSeconds = Int32.Parse(tokenExpires);
DateTimeOffset tokenExpiresDate =
DateTimeOffset.UtcNow.AddSeconds(tokenExpiresSeconds);
// Create UserAccess via JSON deseralization.
UserAccess access = JsonConvert.DeserializeObject<UserAccess>(
String.Format(
"{{ " +
" token: {{ id: '{0}', expires: '{1}' }}, " +
" serviceCatalog: " +
" [ " +
" {{ " +
" endpoints: [ {{ publicUrl: '{2}' }} ], " +
" type: 'object-store', " +
" name: 'swift' " +
" }} " +
" ], " +
" user: {{ }} " +
"}}",
authToken,
tokenExpiresDate,
storageUrl));
if (access == null || access.Token == null)
return null;
return access;
};
string key = string.Format("{0}:{1}", UrlBase, identity.Username);
var userAccess = TokenCache.Get(key, refreshCallback, forceCacheRefresh);
return userAccess;
}
protected override string LookupServiceTypeKey(IServiceType serviceType)
{
return serviceType.Type;
}
}
}
Because some of the members of UserAccess (like IdentityToken and Endpoint) have no way to set their fields (the objects have only a default constructor and only read-only members), I had to create the UserAccess object by deserializing some temporary JSON in a similar format as returned by the V2 API.
This works, ie I can now connect like this:
var cloudIdentity = new CloudIdentity()
{
Username = "SLOS123456-1:email#example.com",
APIKey = "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
};
var identityProvider = new SoftLayerOpenStackIdentityProvider(
new Uri("https://mel01.objectstorage.softlayer.net/auth/v1.0/"),
cloudIdentity);
var token = identityProvider.GetToken(null);
And then get access to files etc like this:
var cloudFilesProvider = new CloudFilesProvider(identityProvider);
var containers = cloudFilesProvider.ListContainers();
var stream = new MemoryStream();
cloudFilesProvider.GetObject("testcontainer", "testfile.dat", stream);
However, is there a better way than this to use SoftLayer Object Storage from .NET?
I briefly also looked at the OpenStack SDK for .NET (a different library to OpenStack.NET), but it too seems to be based on V2 auth.
We make a callout from one Salesforce org to another Salesforce org using the REST API. That worked until end of november. We didn't make any changes at the affected classes or configuration.
Now, while sending a request to the rest api a callout exception will be thrown with the message : "Unable to tunnel through proxy. Proxy returns "HTTP/1.0 503 Service Unavailable".
The authorisation to the rest api is done by session id.
Does anyone have any idea what the problem is?
Here the code snipped:
final String WS_ENDPOINT = 'https://login.salesforce.com/services/Soap/c/24.0';
final String REST_ENDPOINT = 'https://eu2.salesforce.com/services/apexrest/UsageReporterService';
final String USERNAME = '*****';
final String PASSWORD = '*****';
HTTP h = new HTTP();
HTTPRequest req = new HTTPRequest();
req.setMethod('POST');
req.setEndpoint(REST_ENDPOINT);
req.setHeader('Content-Type', 'application/json');
req.setTimeout(60000);
HTTP hLogin = new HTTP();
HTTPRequest reqLogin = new HTTPRequest();
reqLogin.setMethod('POST');
reqLogin.setEndpoint(WS_ENDPOINT);
reqLogin.setHeader('Content-Type', 'text/xml');
reqLogin.setHeader('SOAPAction', 'login');
reqLogin.setTimeout(60000);
reqLogin.setCompressed(false);
// get a valid session id
String sessionId;
String loginSoap = '<?xml version="1.0" encoding="UTF-8"?>';
loginSoap += '<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn="urn:enterprise.soap.sforce.com">';
loginSoap += '<soapenv:Body>';
loginSoap += '<urn:login>';
loginSoap += '<urn:username>' + USERNAME + '</urn:username>';
loginSoap += '<urn:password>' + PASSWORD + '</urn:password>';
loginSoap += '</urn:login>';
loginSoap += '</soapenv:Body>';
loginSoap += '</soapenv:Envelope>';
reqLogin.setBody(loginSoap);
HTTPResponse respLogin;
try {
respLogin = hLogin.send(reqLogin);
} catch(CalloutException c){
return null;
}
System.debug('++++++'+respLogin.getStatus() + ': ' + respLogin.getBody());
Dom.Document doc = new Dom.Document();
doc.load(respLogin.getBody());
Dom.XMLNode root = doc.getRootElement();
String ns = root.getNamespace();
Dom.XMLNode bodyEl = root.getChildElements()[0];
if(bodyEl.getChildElements()[0].getName().equals('loginResponse')){
sessionId = bodyEl.getChildElements()[0].getChildElement('result', ns).getChildElement('sessionId', ns).getText();
}
// finished getting session Id
if(sessionId != null){ // login was successfull
req.setHeader('Authorization', 'Bearer ' + sessionId);
// serialize data into json string
UsageReporterModel usageReporterData = new UsageReporterModel();
String inputStr = usageReporterData.serialize();
req.setBody('{ "usageReportData" : ' + inputStr + '}');
// fire!
HTTPResponse resp;
try {
resp = h.send(req);
} catch(CalloutException c){
return null;
}
}
I suspect this will relate to a change of IP addresses for one of the org's which haven't been whitelisted correctly (or added to the "network access" object). With it being Salesforce to Salesforce I would hope that Salesforce.com support can assist?