I'm trying to validate a transaction receipt from an inApp purchase with the Apple store server from my Twisted server. I have sent the (SKPaymentTransaction *)transaction.transactionReceipt from my app to my server.
But now, sending the JSON object to the Apple server, I keep getting an unhandled error in Deferred from my Agent.request(). I suspect this is because I'm not listening on port 443 for response from Apple store, but I don't want my app to communicate with my Twisted server on port 443 also. Here is my code:
from twisted.application import internet, service
from twisted.internet import protocol, reactor
from zope.interface import implements
from twisted.web.iweb import IBodyProducer
from twisted.internet import defer
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
import json
import base64
class StringProducer(object):
implements(IBodyProducer)
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
def printResponse(response):
print response # just testing to see what I have
def httpRequest(url, values, headers={}, method='POST'):
agent = Agent(reactor)
d = agent.request(method,
url,
Headers(headers),
StringProducer(values)
)
d.addCallback(printResponse)
class storeServer(protocol.Protocol):
def dataReceived(self, data):
receiptBase64 = base64.standard_b64encode(data)
jsonReceipt = json.dumps({'receipt-data':receiptBase64})
print jsonReceipt # verified that my data is correct
d = httpRequest(
"https://buy.itunes.apple.com/verifyReceipt",
jsonReceipt,
{'Content-Type': ['application/x-www-form-urlencoded']}
)
factory = protocol.Factory()
factory.protocol = storeServer
tcpServer = internet.TCPServer(30000, factory)
tcpServer.setServiceParent(application)
How can I fix this error? Do I have to create another service listening on port 443? If so, how might I have the service connecting to my app communicate with the service connecting through https?
The comment style in your code sample is incorrect. Python uses # for comments, not //.
After fixing that and running the snippet through pyflakes, I see these errors:
program.py:1: 'service' imported but unused
program.py:6: 'defer' imported but unused
program.py:21: undefined name 'succeed'
program.py:48: local variable 'd' is assigned to but never used
program.py:57: undefined name 'application'
It seems likely that the undefined name on line 21 is the cause of the NameError you've encountered. NameError is how Python signals this sort of bug:
x = y
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'y' is not defined
Related
I am currently trying to deploy an application from a repo. (https://github.com/IBM/nlc-icd10-classifier#run-locally) But it gives me this error:
Traceback (most recent call last):
File "app.py", line 34, in <module>
iam_apikey=nlc_iam_apikey
TypeError: __init__() got an unexpected keyword argument 'iam_apikey'
I am on Python 3.6.8
app.py:
load_dotenv(os.path.join(os.path.dirname(__file__), ".env"))
nlc_username = os.environ.get("NATURAL_LANGUAGE_CLASSIFIER_USERNAME")
nlc_password = os.environ.get("NATURAL_LANGUAGE_CLASSIFIER_PASSWORD")
nlc_iam_apikey = os.environ.get("NATURAL_LANGUAGE_CLASSIFIER_IAM_APIKEY")
classifier_id = os.environ.get("CLASSIFIER_ID")
# Use provided credentials from environment or pull from IBM Cloud VCAP
if nlc_iam_apikey != "placeholder":
NLC_SERVICE = NaturalLanguageClassifierV1(
iam_apikey=nlc_iam_apikey
)
elif nlc_username != "placeholder":
NLC_SERVICE = NaturalLanguageClassifierV1(
username=nlc_username,
password=nlc_password
.env:
CLASSIFIER_ID=<add_NLC_classifier_id>
#NATURAL_LANGUAGE_CLASSIFIER_USERNAME=<add_NLC_username>
#NATURAL_LANGUAGE_CLASSIFIER_PASSWORD=<add_NLC_password>
NATURAL_LANGUAGE_CLASSIFIER_IAM_APIKEY="placeholderapikeyforstackoverflolw"
It seems that you ran into an issue with the Watson SDK. Recently, with V4, they introduced a breaking change which I found in their release notes. There is a new, more abstract authentication mechanism that caters to different authentication types. You would need to slightly change the code for how NLC is initialized.
This is from the migration instructions:
For example, to pass a IAM apikey:
Before
from ibm_watson import MyService
service = MyService(
iam_apikey='{apikey}',
url='{url}'
)
After(V4.0)
from ibm_watson import MyService
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
authenticator = IAMAuthenticator('{apikey}')
service = MyService(
authenticator=authenticator
)
service.set_service_url('{url}')
good day everybody,
having some issues with flask and authlib. Bellow snip of my flash code
from flask import Flask, render_template
from authlib.integrations.flask_client import OAuth
import os
app = Flask(__name__)
app._static_folder = os.path.abspath("static")
app.config.from_object('config')
oauth = OAuth(app)
webex = oauth.register(name='WEBEX', redirect_uri='http://webapp.dcloud.cisco.com:5000/AuthWebex', client_kwargs={
'scope': 'spark:all'
} )
config.py
import os
WEBEX_CLIENT_ID='C3a256be511cdf07e19f272960c44a214aec14b727b108e4f10bd124d31d2112c'
WEBEX_CLIENT_SECRET='secret'
WEBEX_ACCESS_TOKEN_URL='https://api.ciscospark.com/v1/access_token'
WEBEX_REDIRECT_URI='http://localhost:5000/AuthWebex'
WEBEX_SCOPE='spark:all'
when running above code I get the following error:
File "/Users/tneumann/PycharmProjects/untitled/venv/lib/python3.7/site-packages/authlib/integrations/flask_client/oauth_registry.py", line 61, in register
self.use_oauth_clients()
File "/Users/tneumann/PycharmProjects/untitled/venv/lib/python3.7/site-packages/authlib/integrations/_client/oauth_registry.py", line 49, in use_oauth_clients
clients = self.AVAILABLE_CLIENTS[name]
KeyError: 'requests'
looked at examples and did some research, no luck. Can't find any solution...
thanks in adv.
Tobi
UPDATE:
per comment bellow here the latest code:
from flask import Flask, render_template, url_for, request
from authlib.integrations.flask_client import OAuth
import os
import requests
app = Flask(__name__)
app._static_folder = os.path.abspath("static")
app.config.from_object('config')
app.secret_key = os.urandom(24)
oauth = OAuth(app)
oauth.register(
'webex',
api_base_url='https://api.ciscospark.com/v1',
authorize_url='https://api.ciscospark.com/v1/authorize',
access_token_url='https://api.ciscospark.com/v1/access_token',
redirect_uri='http://webapp.dcloud.cisco.com:5000/AuthWebex',
scope='spark:all')
#app.route('/')
def main():
"""Entry point; the view for the main page"""
return render_template('main.html')
#app.route('/authorize')
def authorize():
return render_template('authorize.html')
#app.route('/login')
def login():
#redirect_uri = url_for('AuthWebex', _external=True)
redirect_uri = 'http://webapp.dcloud.cisco.com:5000/AuthWebex'
print(redirect_uri)
return oauth.webex.authorize_redirect(redirect_uri)
#app.route('/AuthWebex')
def AuthWebex():
#print(request.__dict__)
token = oauth.webex.authorize_access_token( authorization_response=request.url,
redirect_uri='http://webapp.dcloud.cisco.com:5000/AuthWebex',
client_id='C3a256be511cdf07e19f272960c44a214aec14b727b108e4f10bd124d31d2112c',
client_secret='secret',
)
print("Token: ", token)
resp = oauth.webex.get('https://api.ciscospark.com/v1/people/me')
profile = resp.json()
print(profile)
# do something with the token and profile
return '<html><body><h1>Authenticated</h1></body></html>'
if __name__ == '__main__':
app.run()
oauth.webex.authorize_access_token function throws and error when called without the parameters. which is strange as most examples I found exactly do that.
client_id and client_secret are set via the config.py file. This works for the oauth.register function but not for the authorize_access_token.
Additional problem is that even with the parameters, it produces a valid token. When I call the get function I get the following error:
File "/Users/tneumann/PycharmProjects/untitled/venv/lib/python3.7/site-packages/requests/models.py", line 317, in prepare
self.prepare_auth(auth, url)
File "/Users/tneumann/PycharmProjects/untitled/venv/lib/python3.7/site-packages/requests/models.py", line 548, in prepare_auth
r = auth(self)
File "/Users/tneumann/PycharmProjects/untitled/venv/lib/python3.7/site-packages/authlib/integrations/requests_client/oauth2_session.py", line 41, in __call__
raise UnsupportedTokenTypeError(description=description)
authlib.integrations._client.errors.UnsupportedTokenTypeError: unsupported_token_type: Unsupported token_type: 'token_type'
here is the format of the token returned from authorize_access_token function
Token: {'access_token': 'YWIzNGU3<secret>tNDQ5_PF84_7cc07dbd-<secret>-5877334424fd', 'expires_in': 1209599, 'refresh_token': 'MjU2ZDM4N2Et<secret>ZmItMTg5_PF84_7cc07dbd-<secret>877334424fd', 'refresh_token_expires_in': 7722014, 'expires_at': 1574863645}
went through the docs, the code on github and debugging in pycharm with no luck, help would be much appreciated!
The problem here is that this AuthWebex is not a standard OAuth service. The response has no token_type. We can fix it with Authlib compliance fix:
Check the example here:
https://docs.authlib.org/en/latest/client/frameworks.html#compliance-fix-for-oauth-2-0
The slack example has the same issue.
I met an error when running codes at the bottom. It's like a simple ftp.
I use python2.6.6 and CentOS release 6.8
In most linux server, it gets right results like this:(I'm very sorry that I have just sign up and couldn't )
Clinet:
[root#Test ftp]# python client.py
path:put|/home/aaa.txt
Server:
[root#Test ftp]# python server.py
connected...
pre_data:put|aaa.txt|4
cmd: put
file_name: aaa.txt
file_size: 4
upload successed.
But I get errors in some server(such as my own VM in my PC). I have done lots of tests(python2.6/python2.7, Centos6.5/Centos6.7) and found this error is not because them. Here is the error imformation:
[root#Lewis-VM ftp]# python server.py
connected...
pre_data:put|aaa.txt|7sdfsdf ###Here gets the wrong result, "sdfsdf" is the content of /home/aaa.txt and it shouldn't be sent here to 'file_size' and so it cause the "ValueError" below
cmd: put
file_name: aaa.txt
file_size: 7sdfsdf
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 10699)
Traceback (most recent call last):
File "/usr/lib64/python2.6/SocketServer.py", line 570, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib64/python2.6/SocketServer.py", line 332, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/lib64/python2.6/SocketServer.py", line 627, in __init__
self.handle()
File "server.py", line 30, in handle
if int(file_size)>recv_size:
ValueError: invalid literal for int() with base 10: '7sdfsdf\n'
What's more, I found that if I insert a time.sleep(1) between sk.send(cmd+"|"+file_name+'|'+str(file_size)) and sk.send(data) in client.py, the error will disappear. I have said that I did tests in different system and python versions and the error is not because them. So I guess that is it because of some system configs? I have check about socket.send() and socket.recv() in python.org but fount nothing helpful. So could somebody help me to explain why this happend?
The code are here:
#!/usr/bin/env python
#coding:utf-8
################
#This is server#
################
import SocketServer
import os
class MyServer(SocketServer.BaseRequestHandler):
def handle(self):
base_path = '/home/ftp/file'
conn = self.request
print 'connected...'
while True:
#####receive pre_data: we should get data like 'put|/home/aaa|7'
pre_data = conn.recv(1024)
print 'pre_data:' + pre_data
cmd,file_name,file_size = pre_data.split('|')
print 'cmd: ' + cmd
print 'file_name: '+ file_name
print 'file_size: '+ file_size
recv_size = 0
file_dir = os.path.join(base_path,file_name)
f = file(file_dir,'wb')
Flag = True
####receive 1024bytes each time
while Flag:
if int(file_size)>recv_size:
data = conn.recv(1024)
recv_size+=len(data)
else:
recv_size = 0
Flag = False
continue
f.write(data)
print 'upload successed.'
f.close()
instance = SocketServer.ThreadingTCPServer(('127.0.0.1',9999),MyServer)
instance.serve_forever()
#!/usr/bin/env python
#coding:utf-8
################
#This is client#
################
import socket
import sys
import os
ip_port = ('127.0.0.1',9999)
sk = socket.socket()
sk.connect(ip_port)
while True:
input = raw_input('path:')
#####we should input like 'put|/home/aaa.txt'
cmd,path = input.split('|')
file_name = os.path.basename(path)
file_size=os.stat(path).st_size
sk.send(cmd+"|"+file_name+'|'+str(file_size))
send_size = 0
f= file(path,'rb')
Flag = True
#####read 1024 bytes and send it to server each time
while Flag:
if send_size + 1024 >file_size:
data = f.read(file_size-send_size)
Flag = False
else:
data = f.read(1024)
send_size+=1024
sk.send(data)
f.close()
sk.close()
The TCP is a stream of data. That is the problem. TCP do not need to keep message boundaries. So when a client calls something like
connection.send("0123456789")
connection.send("ABCDEFGHIJ")
then a naive server like
while True;
data = conn.recv(1024)
print data + "_"
may print any of:
0123456789_ABCDEFGHIJ_
0123456789ABCDEFGHIJ_
0_1_2_3_4_5_6_7_8_9_A_B_C_D_E_F_G_H_I_J_
The server has no chance to recognize how many sends client called because the TCP stack at client side just inserted data to a stream and the server must be able to process the data received in different number of buffers than the client used.
Your server must contain a logic to separate the header and the data. All of application protocols based on TCP use a mechanism to identify application level boundaries. For example HTTP separates headers and body by an empty line and it informs about the body length in a separate header.
Your program works correctly when server receives a header with the command, name and size in a separate buffer it it fails when client is fast enough and push the data into stream quickly and the server reads header and data in one chunk.
I am wondering why this code
test = smtplib.SMTP('smtp.gmail.com', 587)
test.ehlo()
test.starttls()
test.ehlo()
test.login('address','passw')
test.sendmail(sender, recipients, composed)
test.close()
works, but when written like this
with smtplib.SMTP('smtp.gmail.com', 587) as s:
s.ehlo()
s.starttls()
s.ehlo()
s.login('address','passw')
s.sendmail(sender, recipients, composed)
s.close()
it fails with the message
Unable to send the email. Error: <class 'AttributeError'>
Traceback (most recent call last):
File "py_script.py", line 100, in <module>
with smtplib.SMTP('smtp.gmail.com', 587) as s:
AttributeError: __exit__
Why is this happening? (python3 on a raspberry pi)
Thx
You are not using Python 3.3 or up. In your version of Python, smtplib.SMTP() is not a context manager and cannot be using in a with statement.
The traceback is directly caused because there is no __exit__ method, a requirement for context managers.
From the smptlib.SMTP() documentation:
Changed in version 3.3: Support for the with statement was added.
You can wrap the object in a context manager with #contextlib.contextmanager:
from contextlib import contextmanager
from smtplib import SMTPResponseException, SMTPServerDisconnected
#contextmanager
def quitting_smtp_cm(smtp):
try:
yield smtp
finally:
try:
code, message = smtp.docmd("QUIT")
if code != 221:
raise SMTPResponseException(code, message)
except SMTPServerDisconnected:
pass
finally:
smtp.close()
This uses the same exit behaviour as was added in Python 3.3. Use it like this:
with quitting_smtp_cm(smtplib.SMTP('smtp.gmail.com', 587)) as s:
s.ehlo()
s.starttls()
s.ehlo()
s.login('address','passw')
s.sendmail(sender, recipients, composed)
Note that it'll close the connection for you.
I 've tried to use tornado.platform.twisted to integrate txyam memcached client, but when I try to check it for functioning, next error is thrown:
Traceback (most recent call last):
File "swcomet/tx_memcache_helper.py", line 32, in <module>
mem_helper = MemcacheHelper()
File "swcomet/tx_memcache_helper.py", line 19, in __init__
self.add(4)
File "/home/rustem/work/sw.services.swcomet.python/venv/local/lib/python2.7/site-packages/tornado/gen.py", line 117, in wrapper
gen = func(*args, **kwargs)
File "swcomet/tx_memcache_helper.py", line 25, in add
self.mem.getPickled(user_id, decompress=True)
File "/home/rustem/work/sw.services.swcomet.python/venv/lib/python2.7/site-packages/txyam/client.py", line 133, in getPickled
return self.get(key, **kwargs).addCallback(handleResult, uncompress)
File "/home/rustem/work/sw.services.swcomet.python/venv/lib/python2.7/site-packages/txyam/client.py", line 27, in wrapper
func = getattr(self.getClient(key), cmd)
File "/home/rustem/work/sw.services.swcomet.python/venv/lib/python2.7/site-packages/txyam/client.py", line 48, in getClient
raise NoServerError, "No connected servers remaining."
txyam.client.NoServerError: No connected servers remaining.
The source code which dumps that error:
import tornado.ioloop
import tornado.gen
from txyam.client import YamClient
from swtools.date import _ts
import tornado.platform.twisted
MEMHOSTS = ['127.0.0.1111']
USER_EXPIRATION_TIME = 61
class MemcacheHelper(object):
def __init__(self, *a, **kw):
try:
self.mem = YamClient(["127.0.0.1"])
except Exception, e:
print "ERror", e
self.clients = set()
self.add(4)
#tornado.gen.engine
def add(self, user_id, expire=None):
self.clients.add(user_id)
expire = expire or USER_EXPIRATION_TIME
self.mem.getPickled(user_id, decompress=True)
print "hmmm"
if __name__ == '__main__':
print "trying to start on top of IOLOOP"
ioloop = tornado.ioloop.IOLoop.instance()
#reactor = TornadoReactor(ioloop)
mem_helper = MemcacheHelper()
#mem_helper.add(4)
ioloop.start()
Please, help me to solve this problem!
txyam appears not to let you perform any memcache operations until after at least one connection has been established:
def getActiveConnections(self):
return [factory.client for factory in self.factories if not factory.client is None]
def getClient(self, key):
hosts = self.getActiveConnections()
log.msg("Using %i active hosts" % len(hosts))
if len(hosts) == 0:
raise NoServerError, "No connected servers remaining."
return hosts[ketama(key) % len(hosts)]
It attempts to set up these connections right away:
def __init__(self, hosts):
"""
#param hosts: A C{list} of C{tuple}s containing hosts and ports.
"""
self.connect(hosts)
But connection setup is asynchronous, and it doesn't expose an event to indicate when at least one connection has been established.
So your code fails because you call add right away, before any connections exist. A good long-term fix would be to file a bug report against txyam, because this isn't a very nice interface. YamClient could have a whenReady method that returns a Deferred that fires when you are actually allowed to use the YamClient instance. Or there could be an alternate constructor that returns a Deferred that fires with the YamClient instance, but only after it can be used.