django reverse models from single sql - django-select-related

I have 2 models:
class Restaurant(models.Model):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self): # __unicode__ on Python 2
return "%s the restaurant" % self.place.name
class Waiter(models.Model):
restaurant = models.ForeignKey(Restaurant, on_delete=models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self): # __unicode__ on Python 2
return "%s the waiter at %s" % (self.name, self.restaurant)
if I want to get all waiters and restaurants with a single query using join I write:
w = Waiter.objects.select_related().all()
or
w = Waiter.objects.select_related().get(pk=1)
But how could get a restaurant with all waiters? I tried
r = Restaurant.objects.select_related().get(pk=1)
but it didn't work.
I do not want to do this with 2 db interogations like:
r = Restaurant.objects.get(pk=1)
w = r.waiter_set.all()
It is possible to get all informations with a single query?
Thank you

Related

Code not training fast. I gave 3500000 rows of input as 'data.csv' and system hanged. Even after 24 hours no output

Trying to return the category of input data. Training data is 'data.csv' which is 3500000 rows of sentence and its class.
import nltk
from nltk.stem.lancaster import LancasterStemmer
import os
import csv
import json
import datetime
stemmer = LancasterStemmer()
training_data = []
with open('data.csv') as f:
training_data = [{k: str(v) for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
words = []
classes = []
documents = []
ignore_words = ['?','.','_','-'] #words to be ignored in input data file
for pattern in training_data:
w = nltk.word_tokenize(pattern['sentence'])
words.extend(w)
documents.append((w, pattern['class']))
if pattern['class'] not in classes:
classes.append(pattern['class'])
words = [stemmer.stem(a.lower()) for a in words if a not in ignore_words]
words = list(set(words)) #remove duplicates
classes = list(set(classes))
create our training data
training = []
output = []
output_empty = [0] * len(classes)
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
training.append(bag)
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
output.append(output_row)
import numpy as np
import time
def sigmoid(x):
output = 1/(1+np.exp(-x))
return output
def sigmoid_output_to_derivative(output):
return output*(1-output)
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
def bow(sentence, words, show_details=False):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
bag[i] = 1
return(np.array(bag))
returns the calculated value of the output after multiplying with the sigmoids
def think(sentence, show_details=False):
x = bow(sentence.lower(), words, show_details)
# input layer is our bag of words
l0 = x
# matrix multiplication of input and hidden layer
l1 = sigmoid(np.dot(l0, synapse_0))
# output layer
l2 = sigmoid(np.dot(l1, synapse_1))
return l2

How to get the same initial results if seed is provided , without restarting the Ipython kernel in Tensorflow

I am not sure , whether this question follow any logic as per the design of Tensorflow . Here is the Code
import numpy as np
import tensorflow as tf
np.random.seed(0)
tf.set_random_seed(0)
class Sample():
def __init__(self, hidden_dim = 50 , input_dim = 784):
self.hidden_dim = hidden_dim
self.input_dim = input_dim
self.x = tf.placeholder(tf.float32, [None, self.input_dim])
self._create_network()
self.__minimize()
self.sess = tf.InteractiveSession()
init = tf.initialize_all_variables()
self.sess.run(init)
def _create_network(self):
self.W1 = tf.Variable(tf.random_normal([self.input_dim, self.hidden_dim]))
self.W2 = tf.Variable(tf.random_normal([self.hidden_dim, self.input_dim]))
def __minimize(self):
h1 = tf.matmul(self.x , self.W1)
h2 = tf.matmul(h1, self.W2)
reconstruction = tf.nn.sigmoid(h2)
self.loss = tf.reduce_mean(tf.squared_difference(self.x , reconstruction))
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=0.01).minimize(self.loss)
def partial_fit(self, X):
cost , _ = self.sess.run([self.loss, self.optimizer] , feed_dict = {self.x: X})
return cost
import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
n_samples = mnist.train.num_examples
ex_1 = mnist.train.next_batch(1)[0]
model = Sample()
for i in xrange(11):
c = model.partial_fit(ex_1)
print c
The result is as follows :
0.498799
0.469001
0.449659
0.436665
0.424995
0.414473
0.404129
0.394458
0.39165
0.38483
0.380042
This result is achieved with seed 0 and it is same when I restart the kernel . But suppose , if I ran 10 iteration and then , if I have to start it from the scratch , how will i do it in Ipython . Because , if run after 10 or so iterations , the model continues to start from the remaining values .
I used tf.reset_default_graph() , but that has not make any change to the behavior .
Don't use an InterativeSession but use a normal Session.
Create a new Session each time with the same seed and you will get the same results.
graph = tf.Graph()
with graph.as_default():
model = Sample()
with Session(graph=graph) as sess:
np.random.seed(0)
tf.set_random_seed(0)
for i in xrange(11):
c = model.partial_fit(ex_1)
print c

F# SQLProvider Columns Order Doesn't match the order in the table

I select from a postgresql view\table and export the values into excel file.
The excel file column order need to be the same as the table, but the sqlProvider select them with abc order...
My Code is:
module ViewToExcel
open System
open System.IO
//open Microsoft.Office.Interop.Excel
open System.Drawing
open Npgsql
open FSharp.Data.Sql
open OfficeOpenXml
open Casaubon
open NpgsqlTypes
let [<Literal>] connectionString = #"Server=localhost;Database=db;User Id=postgres;Password=;"
let [<Literal>] npgPath = #"..\packages\Npgsql.3.1.7\lib\net451"
type sqlConnection = SqlDataProvider<ConnectionString = connectionString,
DatabaseVendor = Common.DatabaseProviderTypes.POSTGRESQL,
ResolutionPath = npgPath,
IndividualsAmount = 1000,
UseOptionTypes = true>
let functionParseViewToExcel (excelPath:string, serverName:string, dbName:string) =
/////////////////////////////////Get Data Connection///////////////////////
printf "connect to db\n"
let connectionUserString = #"Server="+serverName+";Database="+dbName+";User Id=postgres;Password=;"
let ctx = sqlConnection.GetDataContext(connectionUserString)
let weekCalcView = ctx.Public.CcVibeWeeklyCalculations
// weekCalcView|> Seq.toList
let weekCalcViewSeq = ctx.Public.CcVibeWeeklyCalculations|> Seq.toArray
////////////////////////////////// Start Excel//////////////////////////////
let newExcelFile = FileInfo(excelPath + "cc_vibe_treatment_period_"+ DateTime.Today.ToString("yyyy_dd_MM")+".xlsx");
if (newExcelFile.Exists) then
newExcelFile.Delete();
let pck = new ExcelPackage(newExcelFile);
//Add the 'xxx' sheet
let ws = pck.Workbook.Worksheets.Add("xxx");
//printf "success to start the excel file\n"
let mutable columNames = "blabla"
for col in weekCalcViewSeq.[0].ColumnValues do
let columnName = match col with |(a, _) -> a
//printf "a %A\n" columnName
let columnNamewithPsic = "," + columnName
columNames <- columNames + columnNamewithPsic
ws.Cells.[1, 1].LoadFromText(columNames.Replace("blabla,",""))|> ignore
ws.Row(1).Style.Fill.PatternType <- Style.ExcelFillStyle.Solid
ws.Row(1).Style.Fill.BackgroundColor.SetColor(Color.FromArgb(170, 170, 170))
ws.Row(1).Style.Font.Bold <- true;
ws.Row(1).Style.Font.UnderLine <- true;
let mutable subject = weekCalcViewSeq.[0].StudySubjectLabel.Value // in order to color the rows according to subjects
let mutable color = 0
for row in 1.. weekCalcViewSeq.Length do
let mutable columValues = "blabla"
for col in weekCalcViewSeq.[row-1].ColumnValues do
let columnValue = match col with |(_, a) -> a
//printf "a %A\n" columnValue
match columnValue with
| null -> columValues <- columValues + "," + ""
| _ -> columValues <- columValues + "," + columnValue.ToString()
ws.Cells.[row + 1, 1].LoadFromText(columValues.Replace("blabla,",""))|> ignore
/////////////////////Color the row according to subject///////////////
if (weekCalcViewSeq.[row - 1].StudySubjectLabel.Value = subject) then
if (color = 0) then
ws.Row(row + 1).Style.Fill.PatternType <- Style.ExcelFillStyle.Solid
ws.Row(row + 1).Style.Fill.BackgroundColor.SetColor(Color.FromArgb(255,255,204))
else
ws.Row(row + 1).Style.Fill.PatternType <- Style.ExcelFillStyle.Solid
ws.Row(row + 1).Style.Fill.BackgroundColor.SetColor(Color.White)
else
subject <- weekCalcViewSeq.[row - 1].StudySubjectLabel.Value
if (color = 0) then
color <- 1
ws.Row(row + 1).Style.Fill.PatternType <- Style.ExcelFillStyle.Solid
ws.Row(row + 1).Style.Fill.BackgroundColor.SetColor(Color.White)
else
color <- 0
ws.Row(row + 1).Style.Fill.PatternType <- Style.ExcelFillStyle.Solid
ws.Row(row + 1).Style.Fill.BackgroundColor.SetColor(Color.FromArgb(255,255,204))
pck.Save()
The Excel Output fields is:
bloating_avg,caps_fail,caps_success,date_of_baseline_visit,discomfort_avg and etc...
But the order in the table isn't the same.
Could someone help me?
Thanks!
You can write a small helper function to extract the field (column) names via npgqsl. After that you can just use this list of column names to create your excel table. The getColNames function gets it from a DataReader. Obviously you can refactor it further, to get at the tablename as parameter, etc.
#r #"..\packages\SQLProvider.1.0.33\lib\FSharp.Data.SqlProvider.dll"
#r #"..\packages\Npgsql.3.1.7\lib\net451\Npgsql.dll"
open System
open FSharp.Data.Sql
open Npgsql
open NpgsqlTypes
let conn = new NpgsqlConnection("Host=localhost;Username=postgres;Password=root;Database=postgres;Pooling=false")
conn.Open()
let cmd = new NpgsqlCommand()
cmd.Connection <- conn
cmd.CommandText <- """ SELECT * FROM public."TestTable1" """
let recs = cmd.ExecuteReader()
let getColNames (recs:NpgsqlDataReader) =
let columns = recs.GetColumnSchema() |> Seq.toList
columns |> List.map (fun x -> x.BaseColumnName)
let colnames = getColNames recs
//val colnames : string list = ["ID"; "DT"; "ADAY"]
rec.Dispose()
conn.Dispose()
You can see that the column names are not in alphabetical order. You could use this column name list to get at the records in the correct order. Or just use the Reader object directly without the type provider.
Edit: Using records to map the table
It is also possible to extract the data, using the type provider, in the required format, by wiring up the types, and then using .MapTo<T>:
type DataRec = {
DT:DateTime
ADAY:String
ID:System.Int64
}
type sql = SqlDataProvider<dbVendor,connString2,"",resPath,indivAmount,useOptTypes>
let ctx = sql.GetDataContext()
let table1 = ctx.Public.TestTable1
let qry = query { for row in table1 do
select row} |> Seq.map (fun x -> x.MapTo<DataRec>())
qry |> Seq.toList
val it : DataRec list = [{DT = 2016/09/27 00:00:00;
ADAY = "Tuesday";
ID = 8L;}; {DT = 2016/09/26 00:00:00;
ADAY = "Monday";
ID = 9L;}; {DT = 2016/09/25 00:00:00;
ADAY = "Sunday";

Confusion with classes and global variables

I've come to a halt in the making of my first project. I'm trying to make a timecard program. I decided to use class object to handle the variables locally, but I can't figure out how to create a class object from user input.
import time
import datetime
import sqlite3
class Employee(object):
def __init__(self, name, position, wage=0, totalpay=0, totalhours=0):
self.name = name
self.position = position
self.wage = wage
self.totalpay = totalpay
self.totalhours = totalhours
def HourlyPay(self):
if self.position not in range(1, 4):
return "%s is not a valid position" % self.position
elif self.position == 1:
self.wage = 105.00
elif self.position == 2:
self.wage = 112.50
elif self.position == 3:
self.wage = 118.50
return "%s at position %i is making %i DKK per hour" % (self.name, self.position, self.wage)
def Salary(self, hours):
self.hours = hours
self.totalpay += self.wage * self.hours
self.totalhours += self.hours
return "%s next salary will be %i DKK" % (self.name, self.totalpay)
# This is out Employee object
EmployeeObj = Employee('John Doe', 1) # Our Employee object
EmployeeObj.HourlyPay()
EmployeeObj.Salary(43) # Takes 'hours' as argument
# Temporary Database config and functions below
conn = sqlite3.connect('database.db')
c = conn.cursor()
# For setting up the database tables: name, position and total.
def Create_table():
c.execute('CREATE TABLE IF NOT EXISTS EmployeeDb(name TEXT, position INTEGER, total REAL)')
# Run to update values given by our Employee object
def Data_entry():
name = str(EmployeeObj.name)
position = int(EmployeeObj.position)
total = float(EmployeeObj.totalpay)
c.execute('INSERT INTO EmployeeDb (name, position, total) VALUES (?, ?, ?)',
(name, position, total))
conn.commit()
c.close()
conn.close()
return True
What I'm trying to achieve is to create this variable from user input:
EmployeeObj = Employee('John Doe', 1) # Our Employee object
May be you can do something like this:
name = input("Enter employee name:")
position = int(input("Enter employee position:"))
EmployeeObj = Employee(name, position)

How to predict in pycaffe?

I have a model that has been trained on CIFAR-10, but I don't realise how can I make a prediction in pycaffe.
I got an image from lmdb but I don't know how to load it in a net and get a predicted class.
My code:
net = caffe.Net('acc81/model.prototxt',
'acc81/cifar10_full_iter_70000.caffemodel.h5',
caffe.TEST)
lmdb_env = lmdb.open('cifar10_test_lmdb/')
lmdb_txn = lmdb_env.begin()
lmdb_cursor = lmdb_txn.cursor()
for key, value in lmdb_cursor:
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(value)
image = caffe.io.datum_to_array(datum)
image = image.astype(np.uint8)
# What's next with the image variable?
# If i try:
# out = net.forward_all(data=np.asarray([image]))
# I get Exception: Input blob arguments do not match net inputs.
print("Image class is " + label)
Use this python script
# Run the script with anaconda-python
# $ /home/<path to anaconda directory>/anaconda/bin/python LmdbClassification.py
import sys
import numpy as np
import lmdb
import caffe
from collections import defaultdict
caffe.set_mode_gpu()
# Modify the paths given below
deploy_prototxt_file_path = '/home/<username>/caffe/examples/cifar10/cifar10_deploy.prototxt' # Network definition file
caffe_model_file_path = '/home/<username>/caffe/examples/cifar10/cifar10_iter_5000.caffemodel' # Trained Caffe model file
test_lmdb_path = '/home/<username>/caffe/examples/cifar10/cifar10_test_lmdb/' # Test LMDB database path
mean_file_binaryproto = '/home/<username>/caffe/examples/cifar10/mean.binaryproto' # Mean image file
# Extract mean from the mean image file
mean_blobproto_new = caffe.proto.caffe_pb2.BlobProto()
f = open(mean_file_binaryproto, 'rb')
mean_blobproto_new.ParseFromString(f.read())
mean_image = caffe.io.blobproto_to_array(mean_blobproto_new)
f.close()
# CNN reconstruction and loading the trained weights
net = caffe.Net(deploy_prototxt_file_path, caffe_model_file_path, caffe.TEST)
count = 0
correct = 0
matrix = defaultdict(int) # (real,pred) -> int
labels_set = set()
lmdb_env = lmdb.open(test_lmdb_path)
lmdb_txn = lmdb_env.begin()
lmdb_cursor = lmdb_txn.cursor()
for key, value in lmdb_cursor:
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(value)
label = int(datum.label)
image = caffe.io.datum_to_array(datum)
image = image.astype(np.uint8)
out = net.forward_all(data=np.asarray([image]) - mean_image)
plabel = int(out['prob'][0].argmax(axis=0))
count += 1
iscorrect = label == plabel
correct += (1 if iscorrect else 0)
matrix[(label, plabel)] += 1
labels_set.update([label, plabel])
if not iscorrect:
print("\rError: key = %s, expected %i but predicted %i" % (key, label, plabel))
sys.stdout.write("\rAccuracy: %.1f%%" % (100.*correct/count))
sys.stdout.flush()
print("\n" + str(correct) + " out of " + str(count) + " were classified correctly")
print ""
print "Confusion matrix:"
print "(r , p) | count"
for l in labels_set:
for pl in labels_set:
print "(%i , %i) | %i" % (l, pl, matrix[(l,pl)])