Can we give Max & Min values statically in normalization using MinMaxScaler Sklearn? - neural-network

So, I have this doubt and have been looking for answers.
Below is the input post request
{
"emotive_Score": [0.89,0.57,0.089,0,0.004,0,0],
"sentiment_Score": [1.521894,-6.4523187],
"mood_score":[40]
}
And I'm using the following code to scale the values.
from flask import Flask, request
from flask_restful import Resource, Api
from json import dumps
from sklearn import preprocessing
import numpy as np
class MoodScore(Resource):
def post(self):
json_data = request.get_json(force=True)
if not json_data:
return {'message': 'No input data provided'}, 400
x = request.json['emotive_Score']
x1 = request.json['sentiment_Score']
x2 = request.json['mood_score']
#Normalisation for Emotive Score
xEmotive = np.array(x)
PositiveEmotive = str(xEmotive[4]+xEmotive[6])
NegativeEmotive = str(xEmotive[0]+xEmotive[1]+xEmotive[2]+xEmotive[3]+xEmotive[5])
EmotiveScoreArray = (PositiveEmotive,NegativeEmotive)
Nml = np.array(EmotiveScoreArray)
float_array = Nml.astype(np.float)
xM = float_array.reshape(-1,1)
minmaxscaler = preprocessing.MinMaxScaler(feature_range=(0,1))
Emotive = minmaxscaler.fit_transform(xM)
#Normalisation for Sentiment Score
xSentiment = np.array(x1)
PositiveSentiment = str(xSentiment[0])
NegativeSentiment = str(xSentiment[1])
SentimentScoreArray = (PositiveSentiment,NegativeSentiment)
Nml1 = np.array(SentimentScoreArray)
float_array1 = Nml1.astype(np.float)
xM1 = float_array1.reshape(-1,1)
minmaxscaler1 = preprocessing.MinMaxScaler(feature_range=(-1,1))
Sentiment = minmaxscaler1.fit_transform(xM1)
return {'PositiveEmotive':str(Emotive[0]),'NegativeEmotive':str(Emotive[1]),'PositiveSentiment':str(Sentiment[0]),'NegativeSentiment':str(Sentiment[1]),'FinalValue':str(Emotive[0]+Emotive[1]+Sentiment[0]+Sentiment[1])}
# return {'FinalScore': str(Sentiment)}
app = Flask(__name__)
api = Api(app)
api.add_resource(MoodScore, '/moodScore')
if __name__ == '__main__':
app.run(port='5005', host="0.0.0.0")
And I'm getting the following as output.
{
"PositiveEmotive": "[0.]",
"NegativeEmotive": "[1.]",
"PositiveSentiment": "[1.]",
"NegativeSentiment": "[-1.]",
"FinalValue": "[1.]"
}
I just want to know whether I can give static values to Min & Max during normalization calculation so that I can get the desired result as below
{
"PositiveEmotive": "[0.546]",
"NegativeEmotive": "[1.]",
"PositiveSentiment": "[0.598]",
"NegativeSentiment": "[-0.6879.]",
"FinalValue": "[1.4561]"
}

Related

Bokeh callback function with image editing not working with pyscript

I am using pyscript to take an image and process it. I am using a bokeh widget to make changes to the image through callback function. The callback function works but stops when I use Pillow to convert the image to greyscale.
Imports:
import numpy as np
from PIL import Image
import js
from js import document, console, Uint8Array, window, File, Bokeh, JSON, localStorage
from pyodide import create_proxy
import asyncio
import io
import json
from bokeh import __version__
from bokeh.document import Document
from bokeh.embed.util import OutputDocumentFor, standalone_docs_json_and_render_items
from bokeh.models import Slider, Div
from bokeh.layouts import Row
from bokeh.protocol.messages.patch_doc import process_document_events
def callback(attr, old, new):
div.text = f'Symbol is: {new}'
first_item = localStorage.getItem('key')
print(first_item)
my_image = imageToBinary(first_item)
print(my_image)
rs_img = convertBintoRS(my_image,8)
rs_img = encodeRS(rs_img, 5, 6)
rs_img = seperate_img(rs_img,5+6,5)
rs_img = RStoBinary(rs_img,8)
rs_img = binaryToImage(rs_img)
display_image(rs_img,"reed_s")
p.on_change('value', callback)
row = Row(children=[p, div])
def imageToBinary(img):
print('enter imageToBinary')
img = img.convert('L')
print('convert to greyscale')
print(img)
img = np.array(img)
print('convert numpy')
x,y = np.shape(img)
print(x,y)
for i in range(0,x):
for j in range(0,y):
if img[i][j] < 255/2:
img[i][j] = 0
else:
img[i][j] = 1
print('success')
return img
I am also including this which is some code that is relevant to the bokeh widget.
def doc_json(model, target):
with OutputDocumentFor([model]) as doc:
doc.title = ""
docs_json, _ = standalone_docs_json_and_render_items(
[model], suppress_callback_warning=False
)
doc_json = list(docs_json.values())[0]
root_id = doc_json['roots']['root_ids'][0]
return doc, json.dumps(dict(
target_id = target,
root_id = root_id,
doc = doc_json,
version = __version__,
))
def _link_docs(pydoc, jsdoc):
def jssync(event):
if getattr(event, 'setter_id', None) is not None:
return
events = [event]
json_patch = jsdoc.create_json_patch_string(pyodide.ffi.to_js(events))
pydoc.apply_json_patch(json.loads(json_patch))
jsdoc.on_change(pyodide.ffi.create_proxy(jssync), pyodide.ffi.to_js(False))
def pysync(event):
json_patch, buffers = process_document_events([event], use_buffers=True)
buffer_map = {}
for (ref, buffer) in buffers:
buffer_map[ref['id']] = buffer
jsdoc.apply_json_patch(JSON.parse(json_patch), pyodide.ffi.to_js(buffer_map), setter_id='js')
pydoc.on_change(pysync)
async def show(plot, target):
pydoc, model_json = doc_json(plot, target)
views = await Bokeh.embed.embed_item(JSON.parse(model_json))
jsdoc = views[0].model.document
_link_docs(pydoc, jsdoc)
asyncio.ensure_future(show(row, 'myplot'))
The callback function stops within imageToBinary at the print('enter imageToBinary')
I know the function works outside the callback function.
I have printed the item going into localStorage and out of it to check it is the same but type() wouldn't work and gave an error.

How to send signals/variables between a QDialog and Main Window

I am currently working on a project that involves graphing text file data into a pyqt graph and I have been running into problems with a subclass QDialog box. My goal is to have the QDialog box use a combox to choose between different data sets to graph (The code below shows the "steering angle" setting being chosen). The problem lies with how to make it so that when the Create Graph button is pressed (Found in the QDialog Class), it runs the createGraph(self): function in the main class. I dont know how to work classes that well so I dont know how to make this work.
If anyone has any pointers on either how to get this working, how to properly structure a PYQT Program or how to make it more efficient, I'm all ears.
Thank you for your time!
Main Window Code:
class MainWidget(QMainWindow):
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
self.activateWindow()
self.raise_()
self.setupGraph()
self.dockcheck = 0
self.graphcheck = 0
self.setWindowTitle("Drag and Drop Test")
self.resize(1200, 800)
self.setAcceptDrops(True)
self.LBLDragAndDrop = QLabel("Drag And Drop Files Here")
self.LBLDragAndDrop.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
if self.graphcheck == 0:
self.setCentralWidget(self.LBLDragAndDrop)
self.path3 = "C:\\Users\\steph\\OneDrive\\Documents\\SAA Wing\\Coding\\Can Bus Data Reading\\Temporary Saves"
self.treeview = QTreeView()
self.treeview.setAnimated(True)
self.fileModel = QFileSystemModel()
self.fileModel.setRootPath(self.path3)
self.indexRoot = self.fileModel.index(self.fileModel.rootPath())
self.treeview.setModel(self.fileModel)
self.treeview.setRootIndex(self.fileModel.index(self.path3))
self.treeview.setColumnWidth(0, 250)
self.treeview.doubleClicked.connect(self.onSelectionChanged)
#self.treeview.doubleClicked.connect(self.openDialog)
####################################################################################################################
# Dialog Box
####################################################################################################################
def onSelectionChanged(self, index):
self.selectionPath = self.sender().model().filePath(index)
self.selectionFilename = (self.selectionPath.split("/")[-1])
IDList = ("ID 00d0","ID 00d1","ID 00d3","ID 00d4","ID 0140","ID 0141","ID 0360","ID 0361")
if self.selectionFilename in IDList:
if self.selectionFilename == "ID 00d0":
editDialog = Dialog00d0()
editDialog.exec_()
####################################################################################################################
# Graphing data
####################################################################################################################
def createGraph(self):
self.graphcheck = 1
if self.graphcheck == 1:
self.setCentralWidget(self.scroll_area)
################################################################################################################
# ID 00D0 Creating Graph
################################################################################################################
if self.selectionFilename == "ID 00d0":
self.df00d0 = pd.read_table(self.selectionPath, header=None , delim_whitespace=True, dtype=object)
self.df00d0.columns = ['Timestamp','ID',"B0","B1","B2","B3","B4","B5","B6","B7"]
self.df00d0.dropna(inplace=True)
self.SA = np.array([], dtype=float)
self.LatAcc = np.array([], dtype=float)
self.LonAcc = np.array([], dtype=float)
self.ComAcc = np.array([], dtype=float)
self.Time00d0 = np.array([], dtype=float)
self.Timestamp00d0 = np.array([], dtype=float)
############################################################################################################
# Getting Time Stamps
############################################################################################################
for item in self.df00d0['Timestamp']:
self.Time00d0 = np.append(self.Time00d0, datetime.fromtimestamp(float(item)).strftime("%H:%M:%S.%f")[:-4])
self.Timestamp00d0 = np.append(self.Timestamp00d0, float(item))
############################################################################################################
# Steering Angle Graph
############################################################################################################
if self.combobox00d0.currentText() == "Steering Angle":
SA_ = (((self.df00d0['B1']) + (self.df00d0['B0'])).apply(int, base=16) * 0.1)
for item in SA_:
if item > 6000:
self.SA = np.append(self.SA, round((item - 6553.6), 1))
else:
self.SA = np.append(self.SA, round(item))
y_value = self.SA
Here is the QDialog Box class code:
class Dialog00d0(QDialog):
def __init__(self):
super().__init__()
self.layout = QVBoxLayout()
hlay = QHBoxLayout()
self.setLayout(self.layout)
self.setWindowTitle("Create Graph")
label = QLabel("Data Type")
self.combobox00d0 = QComboBox()
self.combobox00d0.addItem("Steering Angle")
self.combobox00d0.addItem("Latitudinal Acceleration")
self.combobox00d0.addItem("Longitudinal Acceleration")
self.combobox00d0.addItem("Combined Acceleration")
self.BTNCreateGraph = QPushButton("Create Graph")
self.BTNCancel = QPushButton("Cancel")
hlay.addWidget(self.BTNCreateGraph)
hlay.addWidget(self.BTNCancel)
self.layout.addWidget(label)
self.layout.addWidget(self.combobox00d0)
self.layout.addLayout(hlay)
self.BTNCreateGraph.clicked.connect("I need the self.creatGraph here")
self.BTNCancel.clicked.connect("self.close")
I imagine this will help you.
The pyqtSignal() argument tells you what information you want to carry.
In this case, I'm passing a text.
Good luck, I hope I helped.
import sys
from PyQt5.QtWidgets import QMainWindow, QDialog, QApplication
from PyQt5.QtWidgets import QPushButton, QVBoxLayout
from PyQt5 import QtCore, QtGui
class MainWidget(QMainWindow):
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
button = QPushButton("Button to open dialog")
button.clicked.connect(self.button_clicked)
self.setCentralWidget(button)
self.show()
def button_clicked(self):
dlg = Dialog00d0()
dlg.signEmit.connect(self.createGraph)
dlg.exec()
def createGraph(self, _str):
print('Now Im here')
print(_str)
class Dialog00d0(QDialog):
signEmit = QtCore.pyqtSignal(str)
def __init__(self):
super().__init__()
self.layout = QVBoxLayout()
self.BTNCreateGraph = QPushButton("link to createGraph()")
self.layout.addWidget(self.BTNCreateGraph)
self.setLayout(self.layout)
self.BTNCreateGraph.clicked.connect(self.BTNCreateGraph_clicked)
def BTNCreateGraph_clicked(self):
self.signEmit.emit('But I passed by')
app = QApplication(sys.argv)
win = MainWidget()
app.exec()

How to compare the data entered from the program and the data in MongoDB?

I want to compare the data entered from the program with the data in MongoDB. How can I do it?
import pymongo
from getpass import getpass
baglanti = pymongo.MongoClient("mongodb+srv://user:secure#denemeler.s4ufh.mongodb.net/Bankamatik?authSource=admin&w=majority&readPreference=primary&appname=MongoDB%20Compass&retryWrites=true&ssl=true")
bankamatik = baglanti["Bankamatik"]
hesap = bankamatik["BankaHesapBilgileri"]
def kayitOl():
kullaniciadi = input("Kayıt bölümüne hoş geldiniz. \nKullanıcı adı: ")
sifre = getpass("Şifre(gizli): ")
hesap.insert({"kullaniciadi": kullaniciadi, "sifre": sifre, "bakiye": 1000})
print("Kayıt başarıyla oluşturulmuştur.")
print("Hoş geldiniz.")
girismenu = int(input("Hoş geldiniz. Giriş yapmak isterseniz 1, hesabınız yok ve kayıt olmak isterseniz 2 yazınız."))
if girismenu == 2:
kayitOl()
if girismenu == 1:
print("Giriş yapma kısmındasınız. Lütfen bilgilerinizi girin.")
kullaniciadi = input("Kullanıcı adı: ")
sifre = getpass("Şifre(gizli): ")
arama = hesap.find().sort("kullaniciadi")
for x in arama:
print(x["kullaniciadi"])
I stayed here and couldn't continue.
Example:
Database: kullaniciadi: "emro", sifre: 33
Program input: kullaniciadi: "emro", sifre: 33
Login successful.
How can I do it?
Pass the filter parameters into find() or find_one()
e.g.
user_input = input("Input")
records = hesap.find({"field": user_input})
or
user_input = input("Input")
record = hesap.find_one({"field": user_input})

Implementing K-medoids in Pyspark

I can not find a library to use PAM (K-medoids) in Pyspark.
I have found this in Scala :
https://gist.github.com/erikerlandson/c3c35f0b1aae737fc884
And this issue in Spark which was resolved in 2016 :
https://issues.apache.org/jira/browse/SPARK-4510
https://github.com/apache/spark/pull/3382
But it seems not to be working and this is not included in the mllib documentation :
http://spark.apache.org/docs/2.0.0/api/python/pyspark.mllib.html#module-pyspark.mllib.clustering
Does anyone knows any library for PAM in Pyspark ?
Thank you
I actually had a go at this the other day for fun. Can't say much about performance as I'm quite new to spark. But here is KMedoids with K++ seeding:
# (c) 2020 Jonathan Kelsey
# This code is licensed under MIT license
from pyspark.sql import functions as F
import pyspark
import numpy as np
import sys
def seed_kernel(data_broadcast, data_id_value, centeroids, k, metric):
data = data_broadcast.value
point = data_id_value[1]
min_distance = sys.maxsize
for j in range(len(centeroids)):
distance = metric(point, data[centeroids[j]])
min_distance = min(min_distance, distance)
return min_distance
def seed_clusters(data_broadcast, data_frame, k, metric):
data = data_broadcast.value
centeroids = list(np.random.choice(data.shape[0], 1, replace=False))
for i in range(k - 1):
print("clusterSeed", i)
distances = []
mK = data_frame.rdd.map(lambda data_id_value: seed_kernel(data_broadcast, data_id_value, centeroids, k, metric))
mK_collect = mK.collect()
distances = np.array(mK_collect)
next_centeroid = np.argmax(distances)
centeroids.append(next_centeroid)
print("centeroids", centeroids)
return centeroids
def nearest_centeroid_kernel(data_id_value, centeroid_id_values, metric):
_, data_value = data_id_value
data_np = np.asarray(data_value)
distances = []
for _, centeroid_value in centeroid_id_values:
centeroid_np = np.asarray(centeroid_value)
distance = metric(data_np, centeroid_np)
distances.append(distance)
distances = np.asarray(distances)
closest_centeroid = np.argmin(distances)
return int(closest_centeroid)
def optimise_cluster_membership_spark(data, data_frame, n, metric, intital_cluster_indices=None):
data_shape = data.shape
data_rdd = data_frame.rdd
data_length = data_shape[0]
if intital_cluster_indices is None:
index = np.random.choice(data_length, n, replace=False)
else:
index = intital_cluster_indices
list_index = [int(i) for i in list(index)]
centeroid_id_values = [(i,data[index[i]]) for i in range(len(index))]
data_rdd = data_rdd.filter(lambda data_id_value: int(data_id_value["id"]) not in list_index)
associated_cluster_points = data_rdd.map(lambda data_id_value: (data_id_value[0],nearest_centeroid_kernel(data_id_value, centeroid_id_values, metric)))
clusters = associated_cluster_points.toDF(["id", "bestC"]).groupBy("bestC").agg(F.collect_list("id").alias("cluster"))
return index, clusters
def cost_kernel(data_broadcast, test_centeroid, cluster_data, metric):
data = data_broadcast.value
cluster = np.asarray(cluster_data)
cluster_length = cluster.shape[0]
feature_length = data.shape[1]
test_centeroid_column = np.zeros(shape=(cluster_length, feature_length), dtype=data.dtype)
new_cluster_column = np.zeros(shape=(cluster_length, feature_length), dtype=data.dtype)
for i in range(0, cluster_length):
new_cluster_column[i] = data[cluster[i]]
test_centeroid_column[i] = data[int(test_centeroid)]
pairwise_distance = metric(new_cluster_column, test_centeroid_column)# (np.absolute(new_cluster_column-test_centeroid_column).sum(axis=1))# metric(new_cluster_column, test_centeroid_column)
cost = np.sum(pairwise_distance)
return float(cost) #new_cluster_column.shape[1]
def optimise_centroid_selection_spark(data_broadcast, data_frame, centeroids, clusters_frames, metric):
data = data_broadcast.value
new_centeroid_ids = []
total_cost = 0
for cluster_idx in range(len(centeroids)):
old_centeroid = centeroids[cluster_idx]
cluster_frame = clusters_frames.filter(clusters_frames.bestC == cluster_idx).select(F.explode(clusters_frames.cluster))
cluster_data = cluster_frame.collect()
if cluster_data:
cluster_data = [cluster_data[i].col for i in range(len(cluster_data))]
else:
cluster_data = []
cost_data = cluster_frame.rdd.map(lambda point_id: (point_id[0], cost_kernel(data_broadcast, point_id[0], cluster_data, metric)))
cost = cost_data.map(lambda point_id_cost: point_id_cost[1]).sum()
total_cost = total_cost + cost
point_result = cost_data.sortBy(lambda point_id_cost: point_id_cost[1]).take(1)
if (point_result):
best_point = point_result[0][0]
else:
best_point = old_centeroid
new_centeroid_ids.append(best_point)
return (new_centeroid_ids, total_cost)
def validate_metric(metric):
if (metric == "euclidean" or metric == "hamming"):
return True
if isinstance(metric, dict) == False:
return "Metric is not a dictionary. And not a known string 'euclidean' or 'hamming'"
metric_keys = metric.keys()
if "point" not in metric_keys or "vector" not in metric_keys:
return "Metric does not contain a member function for 'point' and/or 'point'."
if callable(metric["point"]) == False or callable(metric["vector"]) == False:
return "Metric.point and/or Metric.vector are not callable functions."
if (metric["point"].__code__.co_argcount != 2 and metric["vector"].__code__.co_argcount != 2):
return "Metric.point and/or Metric.vector do not both have 2 arguments."
return True
# pre-defined metrics
#vector metrics
def hamming_vector(stack1, stack2):
return (stack1 != stack2).sum(axis=1)
def euclidean_vector(stack1, stack2):
#return (np.absolute(stack2-stack1)).sum(axis=1)
return np.sqrt(((stack2-stack1)**2).sum(axis=1))
# point metrics
def hamming_point(p1, p2):
return np.sum((p1 != p2))
def euclidean_point(p1, p2):
return np.sqrt(np.sum((p1 - p2)**2))
def fit(sc, data, n_clusters = 2, metric = "euclidean", seeding = "heuristic"):
metric_valid = validate_metric(metric)
if metric_valid == True:
if metric == "euclidean":
point_metric = euclidean_point
vector_metric = euclidean_vector
elif metric == "hamming":
point_metric = hamming_point
vector_metric = hamming_vector
else:
point_metric = metric["point"]
vector_metric = metric["vector"]
else:
print(metric_valid)
return
data_np = np.asarray(data)
data_broadcast = sc.broadcast(data_np)
seeds = None
data_frame = sc.parallelize(data).zipWithIndex().map(lambda xy: (xy[1],xy[0])).toDF(["id", "vector"]).cache()
if (seeding == "heuristic"):
seeds = list(seed_clusters(data_broadcast, data_frame, n_clusters, point_metric))
last_centeroids, last_clusters = optimise_cluster_membership_spark(data_np, data_frame, n_clusters, point_metric, seeds)
last_cost = float('inf')
iteration = 0
escape = False
while not escape:
iteration = iteration + 1
current_centeroids, current_cost = optimise_centroid_selection_spark(data_broadcast, data_frame, last_centeroids, last_clusters, vector_metric)
current_centeroids, current_clusters = optimise_cluster_membership_spark(data_np, data_frame, n_clusters, point_metric, current_centeroids)
print((current_cost<last_cost, current_cost, last_cost, current_cost - last_cost))
if (current_cost<last_cost):
print(("iteration",iteration,"cost improving...", current_cost, last_cost, current_centeroids))
last_cost = current_cost
last_centeroids = current_centeroids
last_clusters = current_clusters
else:
print(("iteration",iteration,"cost got worse or did not improve", current_cost, last_cost))
escape = True
bc = last_clusters.sort("bestC", ascending=True).collect()
unpacked_clusters = [bc[i].cluster for i in range(len(bc))]
return (last_centeroids, unpacked_clusters)
I used some sample data from pyclustering as a sanity check:
from pyclustering.cluster import cluster_visualizer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
from pyclustering.samples.definitions import SIMPLE_SAMPLES
sample = read_sample(FCPS_SAMPLES.SAMPLE_GOLF_BALL)
bestCentroids, bestClusters = fit(sc, sample, 9)
visualizer = cluster_visualizer()
visualizer.append_clusters(bestClusters, sample)
visualizer.show()
Your best choice is to adapt this Python implementation into Scala so you take advance of RDD partitions and distributed computation.
https://github.com/letiantian/kmedoids/blob/master/kmedoids.py

google app engine with python 500 error keeps coming

I have created a google app engine application called Ascii Art at this link http://ascii-chan-1018.appspot.com/ but for some reason sometimes when you post something it gives you this error code on the screen
"500 Internal Server Error The server has either erred or is incapable of performing the requested operation." Sometimes it works and sometimes it gives you this error code. I'm not sure if its my source code or if its an error on google's servers.
import os
import re
import sys
import urllib2
import random
import logging
from xml.dom import minidom
from string import letters
import webapp2
import jinja2
from google.appengine.api import memcache
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=True)
art_key = db.Key.from_path('ASCIIChan', 'arts')
def console(s):
sys.stderr.write('%s\n' % s)
IP_URL = "http://api.hostip.info/?ip="
def get_coords(ip):
ip = "17.173.254.223"
url = IP_URL + ip
content = None
try:
content = urllib2.urlopen(url).read()
except URLError:
return
if content:
d = minidom.parseString(content)
coords = d.getElementsByTagName("gml:coordinates")
if coords and coords[0].childNodes[0].nodeValue:
lon, lat = coords[0].childNodes[0].nodeValue.split(',')
return db.GeoPt(lat, lon)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
GMAPS_URL = "http://maps.googleapis.com/maps/api/staticmap?size=380x263&sensor=false&"
def gmap_img(points):
markers = '&'.join('markers=%s,%s' % (p.lat, p.lon) for p in points)
return GMAPS_URL + markers
class Art(db.Model):
title = db.StringProperty(required = True)
art = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
coords = db.GeoPtProperty( )
def top_arts(update = False):
key = 'top'
arts = memcache.get(key)
if arts is None or update:
logging.error("DB QUERY")
arts = db.GqlQuery("SELECT * "
"FROM Art "
"WHERE ANCESTOR IS :1 "
"ORDER BY created DESC "
"LIMIT 10",
art_key)
arts = list(arts)
memcache.set(key, arts)
return arts
class MainPage(Handler):
def render_front(self, title="", art="", error=""):
arts = top_arts()
img_url = None
points = filter(None, (a.coords for a in arts))
if points:
img_url = gmap_img(points)
#display the image URL
self.render("Ascii.html", title = title, art = art, error = error, arts = arts, img_url = img_url)
def get(self):
self.render_front()
def post(self):
title = self.request.get("title")
art = self.request.get("art")
if title and art:
p = Art(parent=art_key, title = title, art = art)
#lookup the user's coordinates from their IP
coords = get_coords(self.request.remote_addr)
#if we have coordinates, add them to the art
if coords:
p.coords = coords
p.put()
#rerun the query and update the cache
top_arts(True)
self.redirect("/")
else:
error = "Invalid, are you sure you entered a title and art work?"
self.render_front(error = error, title = title, art =art)
app = webapp2.WSGIApplication([('/', MainPage)])
Well I'm not sure if you can consider it solving the problem but I just removed the google maps code that was left over. It kept complaining about the line except URLError, so after I removed the maps code it worked fine, and my initial plan was to remove it anyways.