Gcloud ai-platform, can't create model with own prediction-class - gcloud

I try following AI Platform tutorial to upload a model and a prediction routine but one part fail and I don't understand why.
My prediction class is the same as in their tutorial:
%%writefile predictor.py
import os
import pickle
import numpy as np
from sklearn.datasets import load_iris
from sklearn.externals import joblib
class MyPredictor(object):
def __init__(self, model, preprocessor):
self._model = model
self._preprocessor = preprocessor
self._class_names = load_iris().target_names
def predict(self, instances, **kwargs):
inputs = np.asarray(instances)
preprocessed_inputs = self._preprocessor.preprocess(inputs)
if kwargs.get('probabilities'):
probabilities = self._model.predict_proba(preprocessed_inputs)
return probabilities.tolist()
else:
outputs = self._model.predict(preprocessed_inputs)
return [self._class_names[class_num] for class_num in outputs]
#classmethod
def from_path(cls, model_dir):
model_path = os.path.join(model_dir, 'model.joblib')
model = joblib.load(model_path)
preprocessor_path = os.path.join(model_dir, 'preprocessor.pkl')
with open(preprocessor_path, 'rb') as f:
preprocessor = pickle.load(f)
return cls(model, preprocessor)
the code I use to create my model in cloud is:
! gcloud beta ai-platform versions create {VERSION_NAME} \
--model {MODEL_NAME} \
--runtime-version 1.13 \
--python-version 3.5 \
--origin gs://{BUCKET_NAME}/custom_prediction_routine_tutorial/model/ \
--package-uris gs://{BUCKET_NAME}/custom_prediction_routine_tutorial/my_custom_code-0.1.tar.gz \
--prediction-class predictor.MyPredictor
But I end up with such an odd error:
ERROR: (gcloud.beta.ai-platform.versions.create) Bad model detected with error: "Failed to load model: Unexpected error when loading the model: 'ascii' codec can't decode byte 0xf9 in position 2: ordinal not in range(128) (Error code: 0)"
The thing is that when I run the same command without the:
--prediction-class predictor.MyPredictor
it work fine.
Does someone know the reason of this ? I think model.joblib might have an encoding problem but when I load it myself there is nothing wrong

I've find the solution,
In the tutorial they use pickle to save the preprocessor object created, and Joblib to save the model.
You need to use Joblib to save both and then send it to google storage.

Related

Running blenderbot-3B model locally does not provide same result as on Inference API

I tried the facebook/blenderbot-3B model using the Hosted Inference API and it works pretty well (https://huggingface.co/facebook/blenderbot-3B). Now I tried to use it locally with the Python script shown below. The created responses are much worse than from the inference API and do not make sense most of the time.
Is a different code used for the inference API or did I make a mistake?
from transformers import TFAutoModelForCausalLM, AutoTokenizer, BlenderbotTokenizer, TFBlenderbotForConditionalGeneration, TFT5ForConditionalGeneration, BlenderbotTokenizer, BlenderbotForConditionalGeneration
import tensorflow as tf
import torch
device = "cuda:0" if torch.cuda.is_available() else "cpu"
chat_bots = {
'BlenderBot': [BlenderbotTokenizer.from_pretrained("hyunwoongko/blenderbot-9B"), BlenderbotForConditionalGeneration.from_pretrained("hyunwoongko/blenderbot-9B").to(device)],
}
key = 'BlenderBot'
tokenizer, model = chat_bots[key]
for step in range(100):
new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt').to(device)
if step > 0:
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1)
else:
bot_input_ids = new_user_input_ids
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).to(device)
print("Bot: ", tokenizer.batch_decode(chat_history_ids, skip_special_tokens=True)[0])

Receiving error when executitng code in Databricks / Spark DataFrame' object does not support item assignment

When running the following command I get the error
I am running the code on Databricks Platform, but the code is written using Pandas
TypeError: 'DataFrame' object does not support item assignment
Can someone let me know if the error is related to spark / databricks platform not supporting the code?
import numpy as np
import pandas as pd
def matchSchema(df):
df['active'] = df['active'].astype('boolean')
df['price'] = df['counts']/100
df.drop('counts', axis=1, inplace=True)
return df,df.head(3)
(dataset, sample) = matchSchema(df)
print(dataset)
print(sample)
The error is:
TypeError: 'DataFrame' object does not support item assignment
bool is used instead of boolean as a dtype...
df['active'] = df['active'].astype('bool')

How do I print a dictionary vs a defaultdict based dictionary as a yaml file using ruamel.yaml?

Please refer to this trivial block of code shown below. My goal is to use defaultdict to come up with a relatively simple dictionary, and further print the results out as a yaml file.
When I manually define the dictionary, it seems to work just fine and the YAML is displayed exactly the way I want it, but when I use defaultdict to come up with a dictionary, I get an error message and unfortunately I am not able to decipher that.
When I print the dictionary as a JSON, it prints the exact same output. What I am missing?
import sys,ruamel.yaml
import json
from collections import defaultdict
def dict_maker():
return defaultdict(dict_maker)
S = ruamel.yaml.scalarstring.DoubleQuotedScalarString
app = "someapp"
d = {'beats':{'name':S(app), 'udp_address':S('239.1.1.1:10101')}}
foo = dict_maker()
foo["beats"]["name"] = S(app)
foo["beats"]["udp_address"] = S("239.1.1.1:10101")
print "Regular dictionary"
print json.dumps(d, indent=4)
print "defaultdict dictionary"
print json.dumps(foo, indent=4)
print "dictionary as a yaml\n"
ruamel.yaml.dump(d, sys.stdout, Dumper=ruamel.yaml.RoundTripDumper)
print "defaultdict dictionary as a yaml\n"
ruamel.yaml.dump(foo, sys.stdout, Dumper=ruamel.yaml.RoundTripDumper)
Error Message
raise RepresenterError("cannot represent an object: %s" % data)
ruamel.yaml.representer.RepresenterError: cannot represent an object: defaultdict(<function dict_maker at 0x7f1253725a28>, {'beats': defaultdict(<function dict_maker at 0x7f1253725a28>, {'name': u'someapp', 'udp_address': u'239.1.1.1:10101'})})
You seem to be using the word "dictionary" when refering to a Python dict. There is however no such thing as a "defaultdict based dictionary", that would imply that foo after
foo = dict_maker()
would be a dict, and of course it is not: foo is a defaultdict which is dict based (i.e. exactly the other way around from what you write).
That JSON dumps this, is not surprising, as it cannot do more than stupidly dump the key-value pairs as if it were a dict. But when you try to load that JSON back, you see how useless this is as, you cannot continue working with it (at least not in the way expected):
import sys
import json
from collections import defaultdict
import io
def dict_maker():
return defaultdict(dict_maker)
app = "someapp"
foo = dict_maker()
foo["beats"]["name"] = app
foo["beats"]["udp_address"] = "239.1.1.1:10101"
io = io.StringIO()
json.dump(foo, io, indent=4)
io.seek(0)
bar = json.load(io)
bar['otherapp']['name'] = 'some_alt_app'
print(bar['beats']['udp_address'])
The above throws: KeyError: 'otherapp'. And that is because JSON doesn't keep all the information needed.
However, if you use the unsafe YAML dumper, then ruamel.yaml can dump and load this fine:
import sys
from ruamel.yaml import YAML
from collections import defaultdict
import io
def dict_maker():
return defaultdict(dict_maker)
app = "someapp"
yaml = YAML(typ='unsafe')
foo = dict_maker()
foo["beats"]["name"] = app
foo["beats"]["udp_address"] = "239.1.1.1:10101"
io = io.StringIO()
yaml.dump(foo, io)
io.seek(0)
print(io.getvalue())
bar = yaml.load(io)
bar['otherapp']['name'] = 'some_alt_app'
print(bar['beats']['udp_address'])
this doesn't throw an error, as bar is again a defaultdict with dict_maker as the function it defaults to. The above prints
239.1.1.1:10101
as you would expect.
That the RoundTripDumper/Loader doesn't support this out-of-the-box, is because it is based on the SafeDumper/Loader, which cannot dump/load arbitrary Python instances like defaultdict and its dict_maker function reference. Enabling that would make the loading unsafe.
So if you need to use the RoundTripDumper you should add a representer for defaultdict or a subclass thereof (and possible one for dict_maker as well). To be able to load that, you need constructor(s) as well. How to do that is described in the documentation (Dumping Python classes)

Deploying Keras model to Google Cloud ML for serving predictions

I need to understand how to deploy models on Google Cloud ML. My first task is to deploy a very simple text classifier on the service. I do it in the following steps (could perhaps be shortened to fewer steps, if so, feel free to let me know):
Define the model using Keras and export to YAML
Load up YAML and export as a Tensorflow SavedModel
Upload model to Google Cloud Storage
Deploy model from storage to Google Cloud ML
Set the upload model version as default on the models website.
Run model with a sample input
I've finally made step 1-5 work, but now I get this strange error seen below when running the model. Can anyone help? Details on the steps is below. Hopefully, it can also help others that are stuck on one of the previous steps. My model works fine locally.
I've seen Deploying Keras Models via Google Cloud ML and Export a basic Tensorflow model to Google Cloud ML, but they seem to be stuck on other steps of the process.
Error
Prediction failed: Exception during model execution: AbortionError(code=StatusCode.INVALID_ARGUMENT, details="In[0] is not a matrix
[[Node: MatMul = MatMul[T=DT_FLOAT, _output_shapes=[[-1,64]], transpose_a=false, transpose_b=false, _device="/job:localhost/replica:0/task:0/cpu:0"](Mean, softmax_W/read)]]")
Step 1
# import necessary classes from Keras..
model_input = Input(shape=(maxlen,), dtype='int32')
embed = Embedding(input_dim=nb_tokens,
output_dim=256,
mask_zero=False,
input_length=maxlen,
name='embedding')
x = embed(model_input)
x = GlobalAveragePooling1D()(x)
outputs = [Dense(nb_classes, activation='softmax', name='softmax')(x)]
model = Model(input=[model_input], output=outputs, name="fasttext")
# export to YAML..
Step 2
from __future__ import print_function
import sys
import os
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
import keras
from keras import backend as K
from keras.models import model_from_config, model_from_yaml
from optparse import OptionParser
EXPORT_VERSION = 1 # for us to keep track of different model versions (integer)
def export_model(model_def, model_weights, export_path):
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
K.set_learning_phase(0) # all new operations will be in test mode from now on
yaml_file = open(model_def, 'r')
yaml_string = yaml_file.read()
yaml_file.close()
model = model_from_yaml(yaml_string)
# force initialization
model.compile(loss='categorical_crossentropy',
optimizer='adam')
Wsave = model.get_weights()
model.set_weights(Wsave)
# weights are not loaded as I'm just testing, not really deploying
# model.load_weights(model_weights)
print(model.input)
print(model.output)
pred_node_names = output_node_names = 'Softmax:0'
num_output = 1
export_path_base = export_path
export_path = os.path.join(
tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes('initial'))
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
# Build the signature_def_map.
x = model.input
y = model.output
values, indices = tf.nn.top_k(y, 5)
table = tf.contrib.lookup.index_to_string_table_from_tensor(tf.constant([str(i) for i in xrange(5)]))
prediction_classes = table.lookup(tf.to_int64(indices))
classification_inputs = tf.saved_model.utils.build_tensor_info(model.input)
classification_outputs_classes = tf.saved_model.utils.build_tensor_info(prediction_classes)
classification_outputs_scores = tf.saved_model.utils.build_tensor_info(values)
classification_signature = (
tf.saved_model.signature_def_utils.build_signature_def(inputs={tf.saved_model.signature_constants.CLASSIFY_INPUTS: classification_inputs},
outputs={tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES: classification_outputs_classes, tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES: classification_outputs_scores},
method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))
tensor_info_x = tf.saved_model.utils.build_tensor_info(x)
tensor_info_y = tf.saved_model.utils.build_tensor_info(y)
prediction_signature = (tf.saved_model.signature_def_utils.build_signature_def(
inputs={'images': tensor_info_x},
outputs={'scores': tensor_info_y},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={'predict_images': prediction_signature,
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: classification_signature,},
legacy_init_op=legacy_init_op)
builder.save()
print('Done exporting!')
raise SystemExit
if __name__ == '__main__':
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if len(args) < 3:
raise ValueError("Too few arguments!")
model_def = args[0]
model_weights = args[1]
export_path = args[2]
export_model(model_def, model_weights, export_path)
Step 3
gsutil cp -r fasttext_cloud/ gs://quiet-notch-xyz.appspot.com
Step 4
from __future__ import print_function
from oauth2client.client import GoogleCredentials
from googleapiclient import discovery
from googleapiclient import errors
import time
projectID = 'projects/{}'.format('quiet-notch-xyz')
modelName = 'fasttext'
modelID = '{}/models/{}'.format(projectID, modelName)
versionName = 'Initial'
versionDescription = 'Initial release.'
trainedModelLocation = 'gs://quiet-notch-xyz.appspot.com/fasttext/'
credentials = GoogleCredentials.get_application_default()
ml = discovery.build('ml', 'v1', credentials=credentials)
# Create a dictionary with the fields from the request body.
requestDict = {'name': modelName, 'description': 'Online predictions.'}
# Create a request to call projects.models.create.
request = ml.projects().models().create(parent=projectID, body=requestDict)
# Make the call.
try:
response = request.execute()
except errors.HttpError as err:
# Something went wrong, print out some information.
print('There was an error creating the model.' +
' Check the details:')
print(err._get_reason())
# Clear the response for next time.
response = None
raise
time.sleep(10)
requestDict = {'name': versionName,
'description': versionDescription,
'deploymentUri': trainedModelLocation}
# Create a request to call projects.models.versions.create
request = ml.projects().models().versions().create(parent=modelID,
body=requestDict)
# Make the call.
try:
print("Creating model setup..", end=' ')
response = request.execute()
# Get the operation name.
operationID = response['name']
print('Done.')
except errors.HttpError as err:
# Something went wrong, print out some information.
print('There was an error creating the version.' +
' Check the details:')
print(err._get_reason())
raise
done = False
request = ml.projects().operations().get(name=operationID)
print("Adding model from storage..", end=' ')
while (not done):
response = None
# Wait for 10000 milliseconds.
time.sleep(10)
# Make the next call.
try:
response = request.execute()
# Check for finish.
done = True # response.get('done', False)
except errors.HttpError as err:
# Something went wrong, print out some information.
print('There was an error getting the operation.' +
'Check the details:')
print(err._get_reason())
done = True
raise
print("Done.")
Step 5
Use website.
Step 6
def predict_json(instances, project='quiet-notch-xyz', model='fasttext', version=None):
"""Send json data to a deployed model for prediction.
Args:
project (str): project where the Cloud ML Engine Model is deployed.
model (str): model name.
instances ([Mapping[str: Any]]): Keys should be the names of Tensors
your deployed model expects as inputs. Values should be datatypes
convertible to Tensors, or (potentially nested) lists of datatypes
convertible to tensors.
version: str, version of the model to target.
Returns:
Mapping[str: any]: dictionary of prediction results defined by the
model.
"""
# Create the ML Engine service object.
# To authenticate set the environment variable
# GOOGLE_APPLICATION_CREDENTIALS=<path_to_service_account_file>
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}'.format(project, model)
if version is not None:
name += '/versions/{}'.format(version)
response = service.projects().predict(
name=name,
body={'instances': instances}
).execute()
if 'error' in response:
raise RuntimeError(response['error'])
return response['predictions']
Then run function with test input: predict_json({'inputs':[[18, 87, 13, 589, 0]]})
There is now a sample demonstrating the use of Keras on CloudML engine, including prediction. You can find the sample here:
https://github.com/GoogleCloudPlatform/cloudml-samples/tree/master/census/keras
I would suggest comparing your code to that code.
Some additional suggestions that will still be relevant:
CloudML Engine currently only supports using a single signature (the default signature). Looking at your code, I think prediction_signature is more likely to lead to success, but you haven't made that the default signature. I suggest the following:
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_signature,},
legacy_init_op=legacy_init_op)
If you are deploying to the service, then you would invoke prediction like so:
predict_json({'images':[[18, 87, 13, 589, 0]]})
If you are testing locally using gcloud ml-engine local predict --json-instances the input data is slightly different (matches that of the batch prediction service). Each newline-separated line looks like this (showing a file with two lines):
{'images':[[18, 87, 13, 589, 0]]}
{'images':[[21, 85, 13, 100, 1]]}
I don't actually know enough about the shape of model.x to ensure the data being sent is correct for your model.
By way of explanation, it may be insightful to consider the difference between the Classification and Prediction methods in SavedModel. One difference is that, when using tensorflow_serving, which is based on gRPC, which is strongly typed, Classification provides a strongly-typed signature that most classifiers can use. Then you can reuse the same client on any classifier.
That's not overly useful when using JSON since JSON isn't strongly typed.
One other difference is that, when using tensorflow_serving, Prediction accepts column-based inputs (a map from feature name to every value for that feature in the whole batch) whereas Classification accepts row based inputs (each input instance/example is a row).
CloudML abstracts that away a bit and always requires row-based inputs (a list of instances). We even though we only officially support Prediction, but Classification should work as well.

How do I generate binary RFC822-style headers in Python 3.2?

How do I convince email.generator.Generator to use binary in Python 3.2? This seems like precisely the use case for the policy framework that was introduced in Python 3.3, but I would like my code to run in 3.2.
from email.parser import Parser
from email.generator import Generator
from io import BytesIO, StringIO
data = "Key: \N{SNOWMAN}\r\n\r\n"
message = Parser().parse(StringIO(data))
with open("/tmp/rfc882test", "w") as out:
Generator(out, maxheaderlen=0).flatten(message)
Fails with UnicodeEncodeError: 'ascii' codec can't encode character '\u2603' in position 0: ordinal not in range(128).
Your data is not a valid RFC2822 header, which I suspect misleads you. It's a Unicode string, but RFC2822 is always only ASCII. To have non-ASCII characters you need to encode them with a character set and either base64 or quoted-printable encoding.
Hence, valid code would be this:
from email.parser import Parser
from email.generator import Generator
from io import BytesIO, StringIO
data = "Key: =?utf8?b?4piD?=\r\n\r\n"
message = Parser().parse(StringIO(data))
with open("/tmp/rfc882test", "w") as out:
Generator(out, maxheaderlen=0).flatten(message)
Which of course avoids the error completely.
The question is how to generate such headers as =?utf8?b?4piD?= and the answer lies in the email.header module.
I made this example with:
>>> from email import header
>>> header.Header('\N{SNOWMAN}', 'utf8').encode()
'=?utf8?b?4piD?='
To handle files that have a Key: Value format the email module is the wrong solution. Handling such files are easy enough without the email module, and you will not have to work around the restrictions of RF2822. For example:
# -*- coding: UTF-8 -*-
import io
import sys
if sys.version_info > (3,):
def u(s): return s
else:
def u(s): return s.decode('unicode-escape')
def parse(infile):
res = {}
payload = ''
for line in infile:
key, value = line.strip().split(': ',1)
if key in res:
raise ValueError(u("Key {0} appears twice").format(key))
res[key] = value
return res
def generate(outfile, data):
for key in data:
outfile.write(u("{0}: {1}\n").format(key, data[key]))
if __name__ == "__main__":
# Ensure roundtripping:
data = {u('Key'): u('Value'), u('Foo'): u('Bar'), u('Frötz'): u('Öpöpöp')}
with io.open('/tmp/outfile.conf', 'wt', encoding='UTF8') as outfile:
generate(outfile, data)
with io.open('/tmp/outfile.conf', 'rt', encoding='UTF8') as infile:
res = parse(infile)
assert data == res
That code took 15 minutes to write, and works in both Python 2 and Python 3. If you want line continuations etc that's easy to add as well.
Here is a more complete one that supports comments etc.
A useful solution comes from http://mail.python.org/pipermail/python-dev/2010-October/104409.html :
from email.parser import Parser
from email.generator import BytesGenerator
# How do I get surrogateescape from a BytesIO/StringIO?
data = "Key: \N{SNOWMAN}\r\n\r\n" # write this to headers.txt
headers = open("headers.txt", "r", encoding="ascii", errors="surrogateescape")
message = Parser().parse(headers)
with open("/tmp/rfc882test", "wb") as out:
BytesGenerator(out, maxheaderlen=0).flatten(message)
This is for a program that wants to read and write a binary Key: value file without caring about the encoding. To consume the headers as decoded text without being able to write them back out with Generator(), Parser().parse(open("headers.txt", "r", encoding="utf-8")) should be sufficient.