For flexible jobshop problem, getting infeasible solution - or-tools

wanted to discuss one issue i am getting while using the google ortools.
Problem: status of the solution is coming INFEASIBLE. but solution is feasible as per my understandings.
below is the sample.py used
import collections
from ortools.sat.python import cp_model
class SolutionPrinter(cp_model.CpSolverSolutionCallback):
"""Print intermediate solutions."""
def __init__(self):
cp_model.CpSolverSolutionCallback.__init__(self)
self.__solution_count = 0
def on_solution_callback(self):
"""Called at each new solution."""
print('Solution %i, time = %f s, objective = %i' %
(self.__solution_count, self.WallTime(), self.ObjectiveValue()))
self.__solution_count += 1
def flexible_jobshop():
"""Solve a small flexible jobshop problem."""
# Data part.
jobs = [
[ # Job 0 (machine_id, processingtime, starttime,endtime)
[(0, 65, 0, 72), (1, 65, 0, 72), (2, 65, 0, 72)], # task 0 with 3 alternatives
[(3, 170, 0, 240), (4, 170, 0, 240), (5, 170, 0, 240)] # task 1 with 3 alternatives
],
[ # Job 1
[(0, 25, 0, 120), (1, 25, 0, 120), (2, 25, 0, 120)],
[(3, 168, 0, 288), (4, 168, 0, 288), (5, 168, 0, 288)]
],
[ # Job 2
[(0, 34, 0, 480), (1, 34, 0, 480), (2, 34, 0, 480)],
[(3, 32, 0, 504), (4, 32, 0, 504), (5, 32, 0, 504)]
],
[ # Job 3
[(0, 39, 0, 600), (1, 39, 0, 600), (2, 39, 0, 600)],
[(3, 93, 0, 696), (4, 93, 0, 696), (5, 93, 0, 696)]
]
]
# from data import Data
# jobs = Data().p3
# print(jobs)
num_jobs = len(jobs)
all_jobs = range(num_jobs)
num_machines = 3
all_machines = range(num_machines)
# Model the flexible jobshop problem.
model = cp_model.CpModel()
horizon = 0
for job in jobs:
for task in job:
max_task_duration = 0
for alternative in task:
max_task_duration = max(max_task_duration, alternative[1])
horizon += max_task_duration
print('Horizon = %i' % horizon)
# Global storage of variables.
intervals_per_resources = collections.defaultdict(list)
starts = {} # indexed by (job_id, task_id).
presences = {} # indexed by (job_id, task_id, alt_id).
job_ends = []
# Scan the jobs and create the relevant variables and intervals.
for job_id in all_jobs:
job = jobs[job_id]
num_tasks = len(job)
previous_end = None
for task_id in range(num_tasks):
task = job[task_id]
min_duration = task[0][1]
max_duration = task[0][1]
num_alternatives = len(task)
all_alternatives = range(num_alternatives)
for alt_id in range(1, num_alternatives):
alt_duration = task[alt_id][1]
min_duration = min(min_duration, alt_duration)
max_duration = max(max_duration, alt_duration)
# Create main interval for the task.
suffix_name = '_j%i_t%i' % (job_id, task_id)
# start = model.NewIntVar(task[0][2], horizon, 'start' + suffix_name)
start = model.NewIntVar(0, horizon, 'start' + suffix_name)
duration = model.NewIntVar(min_duration, max_duration,
'duration' + suffix_name)
end = model.NewIntVar(0, horizon, 'end' + suffix_name)
# end = model.NewIntVar(task[0][3], horizon, 'end' + suffix_name)
interval = model.NewIntervalVar(start, duration, end,
'interval' + suffix_name)
# Store the start for the solution.
starts[(job_id, task_id)] = start
# Add precedence with previous task in the same job.
if previous_end is not None:
model.Add(start >= previous_end)
previous_end = end
# Create alternative intervals.
if num_alternatives > 1:
l_presences = []
for alt_id in all_alternatives:
alt_suffix = '_j%i_t%i_a%i' % (job_id, task_id, alt_id)
l_presence = model.NewBoolVar('presence' + alt_suffix)
l_start = model.NewIntVar(0, horizon, 'start' + alt_suffix)
l_duration = task[alt_id][0]
l_end = model.NewIntVar(0, horizon, 'end' + alt_suffix)
l_interval = model.NewOptionalIntervalVar(
l_start, l_duration, l_end, l_presence,
'interval' + alt_suffix)
l_presences.append(l_presence)
# Link the master variables with the local ones.
model.Add(start == l_start).OnlyEnforceIf(l_presence)
model.Add(duration == l_duration).OnlyEnforceIf(l_presence)
model.Add(end == l_end).OnlyEnforceIf(l_presence)
# Add the local interval to the right machine.
intervals_per_resources[task[alt_id][1]].append(l_interval)
# Store the presences for the solution.
presences[(job_id, task_id, alt_id)] = l_presence
# Select exactly one presence variable.
model.AddExactlyOne(l_presences)
else:
intervals_per_resources[task[0][1]].append(interval)
presences[(job_id, task_id, 0)] = model.NewConstant(1)
job_ends.append(previous_end)
# Create machines constraints.
for machine_id in all_machines:
intervals = intervals_per_resources[machine_id]
if len(intervals) > 1:
model.AddNoOverlap(intervals)
# Makespan objective
makespan = model.NewIntVar(0, horizon, 'makespan')
model.AddMaxEquality(makespan, job_ends)
model.Minimize(makespan)
# Solve model.
solver = cp_model.CpSolver()
solution_printer = SolutionPrinter()
status = solver.Solve(model, solution_printer)
# Print final solution.
for job_id in all_jobs:
print('Job %i:' % job_id)
for task_id in range(len(jobs[job_id])):
start_value = solver.Value(starts[(job_id, task_id)])
machine = -1
duration = -1
selected = -1
for alt_id in range(len(jobs[job_id][task_id])):
if solver.Value(presences[(job_id, task_id, alt_id)]):
duration = jobs[job_id][task_id][alt_id][0]
machine = jobs[job_id][task_id][alt_id][1]
selected = alt_id
print(
' task_%i_%i starts at %i (alt %i, machine %i, duration %i)' %
(job_id, task_id, start_value, selected, machine, duration))
print('Solve status: %s' % solver.StatusName(status))
print('Optimal objective value: %i' % solver.ObjectiveValue())
print('Statistics')
print(' - conflicts : %i' % solver.NumConflicts())
print(' - branches : %i' % solver.NumBranches())
print(' - wall time : %f s' % solver.WallTime())
flexible_jobshop()
for this data set, it is giving infeasible solution.
Not sure what i m doing wrong here.
please guide.

Related

Polars Dataframe: Apply MinMaxScaler to a column with condition

I am trying to perform the following operation in Polars.
For value in column B which is below 80 will be scaled between 1 and 4, where as for anything above 80, will be set as 5.
df_pandas = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"B": [50, 300, 80, 12, 105, 78, 66, 42, 61.5, 35],
}
)
test_scaler = MinMaxScaler(feature_range=(1,4)) # from sklearn.preprocessing
df_pandas.loc[df_pandas['B']<80, 'Test'] = test_scaler.fit_transform(df_pandas.loc[df_pandas['B']<80, "B"].values.reshape(-1,1))
df_pandas = df_pandas.fillna(5)
This is what I did with Polars:
# dt is a dictionary
dt = df.filter(
pl.col('B')<80
).to_dict(as_series=False)
below_80 = list(dt.keys())
dt_scale = list(
test_scaler.fit_transform(
np.array(dt['B']).reshape(-1,1)
).reshape(-1) # reshape back to one dimensional
)
# reassign to dictionary dt
dt['B'] = dt_scale
dt_scale_df = pl.DataFrame(dt)
dt_scale_df
dummy = df.join(
dt_scale_df, how="left", on="A"
).fill_null(5)
dummy = dummy.rename({"B_right": "Test"})
Result:
A
B
Test
1
50.0
2.727273
2
300.0
5.000000
3
80.0
5.000000
4
12.0
1.000000
5
105.0
5.000000
6
78.0
4.000000
7
66.0
3.454545
8
42.0
2.363636
9
61.5
3.250000
10
35.0
2.045455
Is there a better approach for this?
Alright, I have got 3 examples for you that should help you from which the last should be preferred.
Because you only want to apply your scaler to a part of a column, we should ensure we only send that part of the data to the scaler. This can be done by:
window function over a partition
partition_by
when -> then -> otherwise + min_max expression
Window function over partititon
This requires a python function that will be applied over the partitions. In the function itself we then have to check in which partition we are and deal with it accordingly.
df = pl.from_pandas(df_pandas)
min_max_sc = MinMaxScaler((1, 4))
def my_scaler(s: pl.Series) -> pl.Series:
if s.len() > 0 and s[0] > 80:
out = (s * 0 + 5)
else:
out = pl.Series(min_max_sc.fit_transform(s.to_numpy().reshape(-1, 1)).flatten())
# ensure all types are the same
return out.cast(pl.Float64)
df.with_column(
pl.col("B").apply(my_scaler).over(pl.col("B") < 80).alias("Test")
)
partition_by
This partitions the the original dataframe to a dictionary holding the different partitions. We then only modify the partitions as needed.
parts = (df
.with_column((pl.col("B") < 80).alias("part"))
.partition_by("part", as_dict=True)
)
parts[True] = parts[True].with_column(
pl.col("B").map(
lambda s: pl.Series(min_max_sc.fit_transform(s.to_numpy().reshape(-1, 1)).flatten())
).alias("Test")
)
parts[False] = parts[False].with_column(
pl.lit(5.0).alias("Test")
)
pl.concat([df for df in parts.values()]).select(pl.all().exclude("part"))
when -> then -> otherwise + min_max expression
This one I like best. We can make function that creates a polars expression that is the min_max scaling function you need. This will have best performance.
def min_max_scaler(col: str, predicate: pl.Expr):
x = pl.col(col)
x_min = x.filter(predicate).min()
x_max = x.filter(predicate).max()
# * 3 + 1 to set scale between 1 - 4
return (x - x_min) / (x_max - x_min) * 3 + 1
predicate = pl.col("B") < 80
df.with_column(
pl.when(predicate)
.then(min_max_scaler("B", predicate))
.otherwise(5).alias("Test")
)

Tensorflow, how to solve the problem of memory explosion

I am now reproducing the achievements of an elder:1:https://github.com/sharathadavanne/seld-net.The source code is designed based on CPU. Now I want to run this code on GPU, but there is a problem of memory explosion. I'm a beginner of tensorflow. I don't know what to do now. Can any kind person give me some advice。
#
# A wrapper script that trains the SELDnet and SELD-TCN.
# The training stops when the SELD error (check paper) stops improving.
#
import gc
import tensorflow
import os
import sys
import numpy as np
import matplotlib.pyplot as plot
import cls_data_generator
import evaluation_metrics
import keras_model
import parameter
import utils
import time
plot.switch_backend('agg')
def set_gpu():
"""GPU相关设置"""
# 打印变量在那个设备上
# tf.debugging.set_log_device_placement(True)
# 获取物理GPU个数
gpus = tensorflow.config.experimental.list_physical_devices('GPU')
print('物理GPU个数为:', len(gpus))
# 设置内存自增长
for gpu in gpus:
tensorflow.config.experimental.set_memory_growth(gpu, True)
print('-------------已设置完GPU内存自增长--------------')
# 获取逻辑GPU个数
logical_gpus = tensorflow.config.experimental.list_logical_devices('GPU')
print('逻辑GPU个数为:', len(logical_gpus))
def collect_test_labels(_data_gen_test, _data_out, classification_mode, quick_test):
# Collecting ground truth for test data
nb_batch = 2 if quick_test else _data_gen_test.get_total_batches_in_data()
batch_size = _data_out[0][0]
gt_sed = np.zeros((nb_batch * batch_size, _data_out[0][1], _data_out[0][2]))
gt_doa = np.zeros((nb_batch * batch_size, _data_out[0][1], _data_out[1][2]))
print("nb_batch in test: {}".format(nb_batch))
cnt = 0
for tmp_feat, tmp_label in _data_gen_test.generate():
gt_sed[cnt * batch_size:(cnt + 1) * batch_size, :, :] = tmp_label[0]
gt_doa[cnt * batch_size:(cnt + 1) * batch_size, :, :] = tmp_label[1]
cnt = cnt + 1
if cnt == nb_batch:
break
return gt_sed.astype(int), gt_doa
def plot_functions(fig_name, _tr_loss, _val_loss, _sed_loss, _doa_loss, _epoch_metric_loss):
plot.figure()
nb_epoch = len(_tr_loss)
plot.subplot(311)
plot.plot(range(nb_epoch), _tr_loss, label='train loss')
plot.plot(range(nb_epoch), _val_loss, label='val loss')
plot.legend()
plot.grid(True)
plot.subplot(312)
plot.plot(range(nb_epoch), _epoch_metric_loss, label='metric')
plot.plot(range(nb_epoch), _sed_loss[:, 0], label='er')
plot.plot(range(nb_epoch), _sed_loss[:, 1], label='f1')
plot.legend()
plot.grid(True)
plot.subplot(313)
plot.plot(range(nb_epoch), _doa_loss[:, 1], label='gt_thres')
plot.plot(range(nb_epoch), _doa_loss[:, 2], label='pred_thres')
plot.legend()
plot.grid(True)
plot.savefig(fig_name)
plot.close()
def main(argv):
"""
Main wrapper for training sound event localization and detection network.
:param argv: expects two optional inputs.
first input: job_id - (optional) all the output files will be uniquely represented with this. (default) 1
second input: task_id - (optional) To chose the system configuration in parameters.py.
(default) uses default parameters
"""
set_gpu()
if len(argv) != 3:
print('\n\n')
print('-------------------------------------------------------------------------------------------------------')
print('The code expected two inputs')
print('\t>> python seld.py <job-id> <task-id>')
print('\t\t<job-id> is a unique identifier which is used for output filenames (models, training plots). '
'You can use any number or string for this.')
print('\t\t<task-id> is used to choose the user-defined parameter set from parameter.py')
print('Using default inputs for now')
print('-------------------------------------------------------------------------------------------------------')
print('\n\n')
# use parameter set defined by user
task_id = '1' if len(argv) < 3 else argv[-1]
params = parameter.get_params(task_id)
job_id = 1 if len(argv) < 2 else argv[1]
model_dir = 'models/'
utils.create_folder(model_dir)
unique_name = '{}_ov{}_split{}_{}{}_3d{}_{}'.format(
params['dataset'], params['overlap'], params['split'], params['mode'], params['weakness'],
int(params['cnn_3d']), job_id
)
unique_name = os.path.join(model_dir, unique_name)
print("unique_name: {}\n".format(unique_name))
data_gen_train = cls_data_generator.DataGenerator(
dataset=params['dataset'], ov=params['overlap'], split=params['split'], db=params['db'], nfft=params['nfft'],
batch_size=params['batch_size'], seq_len=params['sequence_length'], classifier_mode=params['mode'],
weakness=params['weakness'], datagen_mode='train', cnn3d=params['cnn_3d'], xyz_def_zero=params['xyz_def_zero'],
azi_only=params['azi_only']
)
data_gen_test = cls_data_generator.DataGenerator(
dataset=params['dataset'], ov=params['overlap'], split=params['split'], db=params['db'], nfft=params['nfft'],
batch_size=params['batch_size'], seq_len=params['sequence_length'], classifier_mode=params['mode'],
weakness=params['weakness'], datagen_mode='test', cnn3d=params['cnn_3d'], xyz_def_zero=params['xyz_def_zero'],
azi_only=params['azi_only'], shuffle=False
)
data_in, data_out = data_gen_train.get_data_sizes()
print(
'FEATURES:\n'
'\tdata_in: {}\n'
'\tdata_out: {}\n'.format(
data_in, data_out
)
)
gt = collect_test_labels(data_gen_test, data_out, params['mode'], params['quick_test'])
sed_gt = evaluation_metrics.reshape_3Dto2D(gt[0])
doa_gt = evaluation_metrics.reshape_3Dto2D(gt[1])
print(
'MODEL:\n'
'\tdropout_rate: {}\n'
'\tCNN: nb_cnn_filt: {}, pool_size{}\n'
'\trnn_size: {}, fnn_size: {}\n'.format(
params['dropout_rate'],
params['nb_cnn3d_filt'] if params['cnn_3d'] else params['nb_cnn2d_filt'], params['pool_size'],
params['rnn_size'], params['fnn_size']
)
)
# SELD-TCN MODEL
print("DATA IN:" + str(data_in))
model = keras_model.get_seldtcn_model(data_in=data_in, data_out=data_out, dropout_rate=params['dropout_rate'],
nb_cnn2d_filt=params['nb_cnn2d_filt'], pool_size=params['pool_size'],
fnn_size=params['fnn_size'], weights=params['loss_weights'])
#'''
best_metric = 99999
conf_mat = None
best_conf_mat = None
best_epoch = -1
patience_cnt = 0
epoch_metric_loss = np.zeros(params['nb_epochs'])
tr_loss = np.zeros(params['nb_epochs'])
val_loss = np.zeros(params['nb_epochs'])
doa_loss = np.zeros((params['nb_epochs'], 6))
sed_loss = np.zeros((params['nb_epochs'], 2))
nb_epoch = 2 if params['quick_test'] else params['nb_epochs']
tot_time = 0
for epoch_cnt in range(nb_epoch):
start = time.time()
hist = model.fit_generator(
generator=data_gen_train.generate(),
steps_per_epoch=2 if params['quick_test'] else data_gen_train.get_total_batches_in_data(),
validation_data=data_gen_test.generate(),
validation_steps=2 if params['quick_test'] else data_gen_test.get_total_batches_in_data(),
epochs=1,
verbose=0
)
tr_loss[epoch_cnt] = hist.history.get('loss')[-1]
val_loss[epoch_cnt] = hist.history.get('val_loss')[-1]
pred = model.predict_generator(
generator=data_gen_test.generate(),
steps=2 if params['quick_test'] else data_gen_test.get_total_batches_in_data(),
verbose=2
)
if params['mode'] == 'regr':
sed_pred = evaluation_metrics.reshape_3Dto2D(pred[0]) > 0.5
doa_pred = evaluation_metrics.reshape_3Dto2D(pred[1])
sed_loss[epoch_cnt, :] = evaluation_metrics.compute_sed_scores(sed_pred, sed_gt, data_gen_test.nb_frames_1s())
if params['azi_only']:
doa_loss[epoch_cnt, :], conf_mat = evaluation_metrics.compute_doa_scores_regr_xy(doa_pred, doa_gt,
sed_pred, sed_gt)
else:
doa_loss[epoch_cnt, :], conf_mat = evaluation_metrics.compute_doa_scores_regr_xyz(doa_pred, doa_gt,
sed_pred, sed_gt)
epoch_metric_loss[epoch_cnt] = np.mean([
sed_loss[epoch_cnt, 0],
1-sed_loss[epoch_cnt, 1],
2*np.arcsin(doa_loss[epoch_cnt, 1]/2.0)/np.pi,
1 - (doa_loss[epoch_cnt, 5] / float(doa_gt.shape[0]))]
)
plot_functions(unique_name, tr_loss, val_loss, sed_loss, doa_loss, epoch_metric_loss)
patience_cnt += 1
if epoch_metric_loss[epoch_cnt] < best_metric:
best_metric = epoch_metric_loss[epoch_cnt]
best_conf_mat = conf_mat
best_epoch = epoch_cnt
model.save('{}_model.h5'.format(unique_name))
patience_cnt = 0
print(
'epoch_cnt: %d, time: %.2fs, tr_loss: %.2f, val_loss: %.2f, '
'F1_overall: %.2f, ER_overall: %.2f, '
'doa_error_gt: %.2f, doa_error_pred: %.2f, good_pks_ratio:%.2f, '
'error_metric: %.2f, best_error_metric: %.2f, best_epoch : %d' %
(
epoch_cnt, time.time() - start, tr_loss[epoch_cnt], val_loss[epoch_cnt],
sed_loss[epoch_cnt, 1], sed_loss[epoch_cnt, 0],
doa_loss[epoch_cnt, 1], doa_loss[epoch_cnt, 2], doa_loss[epoch_cnt, 5] / float(sed_gt.shape[0]),
epoch_metric_loss[epoch_cnt], best_metric, best_epoch
)
)
if epoch_cnt in [2, 10, 20, 30, 40, 50, 60, 70, 80, 100, 120, 140, 150, 170, 190, 200, 250, 300, 450, 400]:
print_metrics(best_conf_mat, best_epoch, best_metric, doa_loss, sed_gt, sed_loss)
tot_time += (time.time() - start)
print("Time elapsed: %.2f hrs\n" %(tot_time/3600))
if patience_cnt > params['patience']:
break
print_metrics(best_conf_mat, best_epoch, best_metric, doa_loss, sed_gt, sed_loss)
def print_metrics(best_conf_mat, best_epoch, best_metric, doa_loss, sed_gt, sed_loss):
print('best_conf_mat : {}'.format(best_conf_mat))
print('best_conf_mat_diag : {}'.format(np.diag(best_conf_mat)))
print('saved model for the best_epoch: {} with best_metric: {}, '.format(best_epoch, best_metric))
print('DOA Metrics: doa_loss_gt: {}, doa_loss_pred: {}, good_pks_ratio: {}'.format(
doa_loss[best_epoch, 1], doa_loss[best_epoch, 2], doa_loss[best_epoch, 5] / float(sed_gt.shape[0])))
print('SED Metrics: F1_overall: {}, ER_overall: {}'.format(sed_loss[best_epoch, 1], sed_loss[best_epoch, 0]))
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)

about or-tools OnlyEnforceIf,AddBoolAnd question

I have a Bool array, I want some ones to appear in certain places like this:
[1,1,1,1,0,0,0,0,0,0] or [0,0,0,0,0,0,1,1,1,1],here I want four ones appear in array's head,or tail,just two places,how can I do this?
model = cp_model.CpModel()
solver = cp_model.CpSolver()
shifts = {}
ones={}
sequence = []
for i in range(10):
shifts[(i)] = model.NewIntVar(0, 10, "shifts(%i)" % i)
ones[(i)] = model.NewBoolVar( '%i' % i)
for i in range(10):
model.Add(shifts[(i)] ==8).OnlyEnforceIf(ones[(i)])
model.Add(shifts[(i)] == 0).OnlyEnforceIf(ones[(i)].Not())
#I want the four 8s in the array to only appear in two positions at the head or tail of the array, and not in other positions.
model.AddBoolAnd([ones[(0)],ones[(1)],ones[(2)],ones[(3)]])# appear in head
#model.AddBoolAnd([ones[(6)],ones[(7)],ones[(8)],ones[(9)]]) #appear in tauk ,error!
model.Add(sum(ones[(i)] for i in range(10)) == 4)
status = solver.Solve(model)
print("status:",status)
res=[]
for i in range(10):
res.append(solver.Value(shifts[(i)]))
print(res)
bold
italic
quote
Try AddAllowedAsignments:
model = cp_model.CpModel()
ones = [model.NewBoolVar("") for _ in range(10)]
model.AddAllowedAssignments(
ones, [[1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]]
)
Edit: #Laurent's suggestion
model = cp_model.CpModel()
ones = [model.NewBoolVar("") for _ in range(10)]
head = model.NewBoolVar("head")
# HEAD
model.AddImplication(head, ones[0])
model.AddImplication(head, ones[1])
model.AddImplication(head, ones[2])
model.AddImplication(head, ones[3])
model.AddImplication(head, ones[4].Not())
model.AddImplication(head, ones[5].Not())
...
# TAIL
model.AddImplication(head.Not(), ones[0].Not())
model.AddImplication(head.Not(), ones[1].Not())
....
model.AddImplication(head.Not(), ones[6])
model.AddImplication(head.Not(), ones[7])
model.AddImplication(head.Not(), ones[8])
model.AddImplication(head.Not(), ones[9])

Google OR-Tools doesn't find solution on VRPtw problem

I'm tackling with VRPtw problem and struggling that the solver finds no solution with any data except for artificial small one.
The setting is as below.
There are several depots and locations to visit. Each locations have the time-window. Each vehicles have break time and work time. Also, the locations have some constraints and only the vehicles which satisfy that demand can visit there.
Based on this experiment setting, I wrote the code below.
As I wrote, it looks that it is working with small artificial data, but with real data, it never found the solution. I tried with 5 different data sets.
Although I set the 7200 sec time limit, previously I ran for longer than 10 hours and it was same.
The data's scale is 40~50 vehicles and 200~300 locations.
Does this code have a problem? If not, on what kind of order, should I change the approach(such as initialization, searching method and so on)?
(Edited to use integer for time matrix)
from dataclasses import dataclass
from typing import List, Tuple
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
# TODO: Refactor
BIG_ENOUGH = 100000000
TIME_DIMENSION = 'Time'
TIME_LIMIT = 7200
#dataclass
class DataSet:
time_matrix: List[List[int]]
locations_num: int
vehicles_num: int
vehicles_break_time_window: List[Tuple[int, int, int]]
vehicles_work_time_windows: List[Tuple[int, int]]
location_time_windows: List[Tuple[int, int]]
vehicles_depots_indices: List[int]
possible_vehicles: List[List[int]]
def execute(data: DataSet):
manager = pywrapcp.RoutingIndexManager(data.locations_num,
data.vehicles_num,
data.vehicles_depots_indices,
data.vehicles_depots_indices)
routing_parameters = pywrapcp.DefaultRoutingModelParameters()
routing_parameters.solver_parameters.trace_propagation = True
routing_parameters.solver_parameters.trace_search = True
routing = pywrapcp.RoutingModel(manager, routing_parameters)
def time_callback(source_index, dest_index):
from_node = manager.IndexToNode(source_index)
to_node = manager.IndexToNode(dest_index)
return data.time_matrix[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(time_callback)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
routing.AddDimension(
transit_callback_index,
BIG_ENOUGH,
BIG_ENOUGH,
False,
TIME_DIMENSION)
time_dimension = routing.GetDimensionOrDie(TIME_DIMENSION)
# set time window for locations start time
# set condition restrictions
possible_vehicles = data.possible_vehicles
for location_idx, time_window in enumerate(data.location_time_windows):
index = manager.NodeToIndex(location_idx + data.vehicles_num)
time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
routing.SetAllowedVehiclesForIndex(possible_vehicles[location_idx], index)
solver = routing.solver()
for i in range(data.vehicles_num):
routing.AddVariableMinimizedByFinalizer(
time_dimension.CumulVar(routing.Start(i)))
routing.AddVariableMinimizedByFinalizer(
time_dimension.CumulVar(routing.End(i)))
# set work time window for vehicles
for vehicle_index, work_time_window in enumerate(data.vehicles_work_time_windows):
start_index = routing.Start(vehicle_index)
time_dimension.CumulVar(start_index).SetRange(work_time_window[0],
work_time_window[0])
end_index = routing.End(vehicle_index)
time_dimension.CumulVar(end_index).SetRange(work_time_window[1],
work_time_window[1])
# set break time for vehicles
node_visit_transit = {}
for n in range(routing.Size()):
if n >= data.locations_num:
node_visit_transit[n] = 0
else:
node_visit_transit[n] = 1
break_intervals = {}
for v in range(data.vehicles_num):
vehicle_break = data.vehicles_break_time_window[v]
break_intervals[v] = [
solver.FixedDurationIntervalVar(vehicle_break[0],
vehicle_break[1],
vehicle_break[2],
True,
'Break for vehicle {}'.format(v))
]
time_dimension.SetBreakIntervalsOfVehicle(
break_intervals[v], v, node_visit_transit
)
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GREEDY_DESCENT)
search_parameters.time_limit.seconds = TIME_LIMIT
search_parameters.log_search = True
solution = routing.SolveWithParameters(search_parameters)
return solution
if __name__ == '__main__':
data = DataSet(
time_matrix=[[0, 0, 4, 5, 5, 6],
[0, 0, 6, 4, 5, 5],
[1, 3, 0, 6, 5, 4],
[2, 1, 6, 0, 5, 4],
[2, 2, 5, 5, 0, 6],
[3, 2, 4, 4, 6, 0]],
locations_num=6,
vehicles_num=2,
vehicles_depots_indices=[0, 1],
vehicles_work_time_windows=[(720, 1080), (720, 1080)],
vehicles_break_time_window=[(720, 720, 15), (720, 720, 15)],
location_time_windows=[(735, 750), (915, 930), (915, 930), (975, 990)],
possible_vehicles=[[0], [1], [0], [1]]
)
solution = execute(data)
if solution is not None:
print("solution is found")

MyHDL: Can't translating Signal.intbv.max to VHDL

I'm new to python and MyHDL so I started by converting old VHDL projects to MyHDL. This project is a vga timer that can accept any width, height, and frequency (given that they actually work with monitors). It doesn't successfully convert to either VHDL or Verilog because of the statements:
h_count.val.max # line 30
v_count.val.max # line 33
I can print their values just fine so they definitely evaluate to integers, but if I replace them with their literal values then it properly converts. I couldn't find anything about this in the myhdl issue tracker, but I don't want to add a false issue because of a newbie's mistake. Is there a proper way to use Signal.val.max or do I just avoid it? Here's the full code:
from myhdl import Signal, intbv, always_comb, always, toVHDL
def vga_timer(clk, x, y, h_sync, v_sync, vidon, width=800, height=600, frequency=72,
left_buffer=0, right_buffer=0, top_buffer=0, bottom_buffer=0):
# load vga constants by resolution
resolution = (width, height, frequency)
supported_resolutions = {(640, 480, 60): (16, 96, 48, 10, 2, 33, 0),
(800, 600, 60): (40, 128, 88, 1, 4, 23, 1),
(800, 600, 72): (56, 120, 64, 37, 6, 23, 1),
(1024, 768, 60): (24, 136, 160, 3, 6, 29, 0),
(1280, 720, 60): (72, 80, 216, 3, 5, 22, 1),
(1920, 1080, 60): (88, 44, 148, 4, 5, 36, 1)}
assert resolution in supported_resolutions, "%ix%i # %ifps not a supported resolution" % (width, height, frequency)
screen_constants = supported_resolutions.get(resolution)
# h for horizontal variables and signals, v for vertical constants and signals
h_front_porch, h_sync_width, h_back_porch, v_front_porch, v_sync_width, v_back_porch, polarity = screen_constants
h_count = Signal(intbv(0, 0, width + h_front_porch + h_sync_width + h_back_porch))
v_count = Signal(intbv(0, 0, height + v_front_porch + v_sync_width + v_back_porch))
print(h_count.val.max)
print(v_count.val.max)
#always(clk.posedge)
def counters():
h_count.next = h_count + 1
v_count.next = v_count
if h_count == 1040 - 1: # h_count.val.max - 1:
h_count.next = 0
v_count.next = v_count + 1
if v_count == 666 - 1: # v_count.val.max - 1:
v_count.next = 0
# determines h_sync and v_sync
#always_comb
def sync_pulses():
h_sync_left = width - left_buffer + h_front_porch
h_sync_right = h_sync_left + h_sync_width
h_sync.next = polarity
if h_sync_left <= h_count and h_count < h_sync_right:
h_sync.next = not polarity
v_sync_left = height - top_buffer + v_front_porch
v_sync_right = v_sync_left + v_sync_width
v_sync.next = polarity
if v_sync_left <= v_count and v_count < v_sync_right:
v_sync.next = not polarity
#always_comb
def blanking():
vidon.next = 0
if h_count < width - left_buffer - right_buffer and v_count < height - top_buffer - bottom_buffer:
vidon.next = 1
#always_comb
def x_y_adjust():
# x and y are only used when vidon = 1. during this time x = h_count and y = v_count
x.next = h_count[len(x.val):]
y.next = v_count[len(y.val):]
return counters, sync_pulses, blanking, x_y_adjust
width = 800
height = 600
frequency = 72
clk = Signal(bool(0))
x = Signal(intbv(0)[(width-1).bit_length():])
y = Signal(intbv(0)[(height-1).bit_length():])
h_sync = Signal(bool(0))
v_sync = Signal(bool(0))
vidon = Signal(bool(0))
vga_timer_inst = toVHDL(vga_timer, clk, x, y, h_sync, v_sync, vidon, width, height, frequency)
Any miscellaneous advice on my code is also welcome.
You may have found this out by now, but if you want convertible code, you can't use the signal qualities (min, max, number of bits, etc.) in the combinational or sequential blocks. You can use them in constant assignments outside these blocks, though. So if you put these instead of your print statements:
h_counter_max = h_count.val.max - 1
v_counter_max = v_count.val.max - 1
you can use h_counter_max and v_counter_max in your tests on line 30 and 33.
The min, max attributes can be used in the latest version.