I have installed buildbot master and slave and when i am running the slave after starting the master this is my master script for a build name simplebuild.
c = BuildmasterConfig = {}
c['status'] = []
from buildbot.status import html
from buildbot.status.web import authz, auth
authz_cfg=authz.Authz(
auth=auth.BasicAuth([("slave1","slave1")]),
gracefulShutdown = False,
forceBuild = 'auth',
forceAllBuilds = False,
pingBuilder = False,
stopBuild = False,
stopAllBuilds = False,
cancelPendingBuild = False,
)
c['status'].append(html.WebStatus(http_port=8010, authz=authz_cfg))
from buildbot.process.factory import BuildFactory
from buildbot.steps.source import SVN
from buildbot.steps.shell import ShellCommand
qmake = ShellCommand(name = "qmake",
command = ["qmake"],
haltOnFailure = True,
description = "qmake")
makeclean = ShellCommand(name = "make clean",
command = ["make", "clean"],
haltOnFailure = True,
description = "make clean")
checkout = SVN(baseURL = "file:///home/aguerofire/buildbottestsetup/codeRepo/",
mode = "update",
username = "pawan",
password = "pawan",
haltOnFailure = True )
makeall = ShellCommand(name = "make all",
command = ["make", "all"],
haltOnFailure = True,
description = "make all")
f_simplebuild = BuildFactory()
f_simplebuild.addStep(checkout)
f_simplebuild.addStep(qmake)
f_simplebuild.addStep(makeclean)
f_simplebuild.addStep(makeall)
from buildbot.buildslave import BuildSlave
c['slaves'] = [
BuildSlave('slave1', 'slave1'),
]
c['slavePortnum'] = 13333
from buildbot.config import BuilderConfig
c['builders'] = [
BuilderConfig(name = "simplebuild", slavenames = ['slave1'], factory = f_simplebuild)
]
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.changes import filter
trunkchanged = SingleBranchScheduler(name = "trunkchanged",
change_filter = filter.ChangeFilter(branch = 'master'),
treeStableTimer = 10,
builderNames = ["simplebuild"])
c['schedulers'] = [ trunkchanged ]
from buildbot.changes.svnpoller import SVNPoller
svnpoller = SVNPoller(svnurl = "file:///home/aguerofire/buildbottestsetup/codeRepo/",
svnuser = "pawan",
svnpasswd = "pawan",
pollinterval = 20,
split_file = None)
c['change_source'] = svnpoller
After running this script when i check at browser the status of the build then i am not getting any status of the build.
detail inside the waterfall view is
My first question is where actual build is performed at master's end or slave's end ?
What can be the problem in the configuring of buildbot as i have made an error in the commit and was trying to find out weather it will be shown in the waterfall display...but again no error and the same screen as coming in the console view and waterfall view ?
Builds are run on a slave, master just manages schedulers, builders and slaves.
It seems that builds are not run. As for your second screenshot, it shows change info, but not build info. What does your "builders" tab show?
Related
I'm trying to import a Linux VM Scale Set that was deployed in the Azure Portal from a custom shared image, also created in the portal. I'm using the following command:
terraform import module.vm_scaleset.azurerm_linux_virtual_machine_scale_set.vmscaleset /subscriptions/00000000-0000-0000-0000-000000000000
/resourceGroups/myrg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss1
Import fails with the following error:
Error: retrieving Virtual Machine Scale Set "vmss1" (Resource Group "myrg"): properties.virtualMachineProfile.osProfile was nil
Below is my VM Scale set module code
data "azurerm_lb" "loadbalancer" {
name = var.lbName
resource_group_name = var.rgName
}
data "azurerm_lb_backend_address_pool" "addresspool" {
loadbalancer_id = data.azurerm_lb.loadbalancer.id
name = var.lbAddressPool
}
data "azurerm_shared_image" "scaleset_image" {
provider = azurerm.ist
name = var.scaleset_image_name
gallery_name = var.scaleset_image_gallery
resource_group_name = var.scaleset_image_rgname
}
resource "azurerm_linux_virtual_machine_scale_set" "vmscaleset" {
name = var.vmssName
resource_group_name = var.rgName
location = var.location
sku = var.vms_sku
instances = var.vm_instances
admin_username = azurerm_key_vault_secret.vmssusername.value
admin_password = azurerm_key_vault_secret.vmsspassword.value
disable_password_authentication = false
zones = var.vmss_zones
source_image_id = data.azurerm_shared_image.scaleset_image.id
tags = module.vmss_tags.tags
os_disk {
storage_account_type = var.vmss_osdisk_storage
caching = "ReadWrite"
create_option = "FromImage"
}
data_disk {
storage_account_type = "StandardSSD_LRS"
caching = "None"
disk_size_gb = 1000
lun = 10
create_option = "FromImage"
}
network_interface {
name = format("nic-%s-001", var.vmssName)
primary = true
enable_accelerated_networking = true
ip_configuration {
name = "internal"
load_balancer_backend_address_pool_ids = [data.azurerm_lb_backend_address_pool.addresspool.id]
primary = true
subnet_id = var.subnet_id
}
}
lifecycle {
ignore_changes = [
tags
]
}
}
The source image was created from a Linux RHEL 8.6 VM that included a custom node.js script.
Examination of the Scale Set in the portal does indeed show that the virtualMachineProfile.osProfile is absent.
I haven't been able to find a solution on any forum. Is there any way to ignore the error and import the Scale Set anyway?
I am trying to run a code repository downloaded from GitHub as it is mentioned in the instruction of that but getting following error.
TypeError: init() missing 1 required positional argument: 'image_paths'
I am having this error at the code line 63 (preprocessing = preprocessing).
When I srat the program in debug mode I shows following error
unable to get repr for <class 'albumentations.core.composition.compose'>
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import torch
from skimage import io
from utils import adjust_sar_contrast, compute_building_score, plot_images
sys.path.append('/home/salman/Downloads/SpaceNet_SAR_Buildings_Solutions-master/4-motokimura/tmp/work')
from spacenet6_model.configs.load_config import get_config_with_previous_experiment
from spacenet6_model.datasets import SpaceNet6TestDataset
from spacenet6_model.models import get_model
from spacenet6_model.transforms import get_augmentation, get_preprocess
# select previous experiment to load
exp_id = 14
exp_log_dir = "/home/salman/Downloads/SpaceNet_SAR_Buildings_Solutions-master/4-motokimura/tmp/logs" # None: use default
# select device to which the model is loaded
cuda = True
if cuda:
device = 'cuda'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
else:
device = 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
# overwrite default config with previous experiment
config = get_config_with_previous_experiment(exp_id=exp_id, exp_log_dir=exp_log_dir)
# overwrite additional hyper parameters
config.MODEL.DEVICE = device
config.WEIGHT_ROOT = "/home/salman/Downloads/SpaceNet_SAR_Buildings_Solutions-master/4-motokimura/tmp/weights/"
config.MODEL.WEIGHT = f"/home/salman/Downloads/SpaceNet_SAR_Buildings_Solutions-master/4-motokimura/tmp/weights/exp_{exp_id:04d}/model_best.pth"
config.INPUT.MEAN_STD_DIR = "/home/salman/Downloads/SpaceNet_SAR_Buildings_Solutions-master/4-motokimura/tmp/work/models/image_mean_std/"
config.INPUT.TEST_IMAGE_DIR = "/home/salman/data/SN6_buildings_AOI_11_Rotterdam_test_public/test_public/AOI_11_Rotterdam/SAR-Intensity"
config.INPUT.SAR_ORIENTATION="/home/salman/Downloads/SpaceNet_SAR_Buildings_Solutions-master/4-motokimura/tmp/work/static/SAR_orientations.txt"
config.TRAIN_VAL_SPLIT_DIR="/home/salman/Downloads/data/spacenet6/split"
config.PREDICTION_ROOT="/home/salman/Downloads/data/spacenet6/predictions"
config.POLY_CSV_ROOT="/home/salman/Downloads/data/spacenet6/polygons"
config.CHECKPOINT_ROOT="/home/salman/Downloads/data/spacenet6/ceckpoints"
config.POLY_OUTPUT_PATH="/home/salman/Downloads/data/spacenet6/val_polygons"
config.freeze()
print(config)
model = get_model(config)
model.eval();
from glob import glob
image_paths = glob(os.path.join(config.INPUT.TEST_IMAGE_DIR, "*.tif"))
#print(image_paths)
preprocessing = get_preprocess(config, is_test=True)
augmentation = get_augmentation(config, is_train=False)
test_dataset = SpaceNet6TestDataset(
config,
augmentation=augmentation,
preprocessing=preprocessing
)
test_dataset_vis = SpaceNet6TestDataset(
config,
augmentation=augmentation,
preprocessing=None
)
channel_footprint = config.INPUT.CLASSES.index('building_footprint')
channel_boundary = config.INPUT.CLASSES.index('building_boundary')
score_thresh = 0.5
alpha = 1.0
start_index = 900
N = 20
for i in range(start_index, start_index + N):
image_vis = test_dataset_vis[i]['image']
image = test_dataset[i]['image']
x_tensor = image.unsqueeze(0).to(config.MODEL.DEVICE)
pr_score = model.module.predict(x_tensor)
pr_score = pr_score.squeeze().cpu().numpy()
pr_score_building = compute_building_score(
pr_score[channel_footprint],
pr_score[channel_boundary],
alpha=alpha
)
pr_mask = pr_score_building > score_thresh
rotated = test_dataset[i]['rotated']
if rotated:
image_vis = np.flipud(np.fliplr(image_vis))
pr_mask = np.flipud(np.fliplr(pr_mask))
plot_images(
SAR_intensity_0=(adjust_sar_contrast(image_vis[:, :, 0]), 'gray'),
building_mask_pr=(pr_mask, 'viridis')
)
The function which this code calls is given below:
def get_spacenet6_preprocess(config, is_test):
"""
"""
mean_path = os.path.join(
config.INPUT.MEAN_STD_DIR,
config.INPUT.IMAGE_TYPE,
'mean.npy'
)
mean = np.load(mean_path)
mean = mean[np.newaxis, np.newaxis, :]
std_path = os.path.join(
config.INPUT.MEAN_STD_DIR,
config.INPUT.IMAGE_TYPE,
'std.npy'
)
std = np.load(std_path)
std = std[np.newaxis, np.newaxis, :]
if is_test:
to_tensor = albu.Lambda(
image=functools.partial(_to_tensor)
)
else:
to_tensor = albu.Lambda(
image=functools.partial(_to_tensor),
mask=functools.partial(_to_tensor)
)
preprocess = [
albu.Lambda(
image=functools.partial(
_normalize_image,
mean=mean,
std=std
)
),
to_tensor,
]
return albu.Compose(preprocess)
My code:
import pandas
import requests
from bs4 import BeautifulSoup
from pywebio.input import *
from pywebio.output import *
from time import sleep
from pywebio import start_server
print("News web application successfully started!")
def app():
request = requests.get("https://news.google.com/topics/CAAqRggKIkBDQklTS2pvUVkyOTJhV1JmZEdWNGRGOXhkV1Z5ZVlJQkZRb0lMMjB2TURKcU56RVNDUzl0THpBeFkzQjVlU2dBUAE/sections/CAQqSggAKkYICiJAQ0JJU0tqb1FZMjkyYVdSZmRHVjRkRjl4ZFdWeWVZSUJGUW9JTDIwdk1ESnFOekVTQ1M5dEx6QXhZM0I1ZVNnQVAB?hl=en-US&gl=US&ceid=US%3Aen")
content = BeautifulSoup(request.content, 'html.parser')
find = content.find('div', class_='ajwQHc BL5WZb')
#open('test.html', 'w').write(findstr.find)
h3 = find.find_all('h3')
time = find.find_all('time')
link = find.find_all('article')#.find_all('a').get('href').replace('.', '')
result = []
#print('https://news.google.com' + link)
#put_html('<table border="1" width="100%" cellpadding="5">')
source = find.find_all('a', {'data-n-tid' : '9'})
time = find.find_all('time')
textoutput = []
textoutput1 = []
textoutput2 = []
takeoutput3 = []
#writer = open('news.csv', 'w')
#writer1 = csv.writer(writer, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for text in h3:
a1 = text.text
textoutput.append(a1)
for text2 in source:
a3 = text2.text
textoutput1.append(a3)
for text1 in time:
a5 = text1.text
textoutput2.append(a5)
#for result in link:
# alinks = result.find_all('a')
# alinks1 = []
# for alinks1 in alinks:
# alinks2 = alinks1.get('href')
# alinksreplace = str(alinks2)
# alinksreplace1 = alinksreplace.replace(".", "")
# alinksreplace2 = alinksreplace1.replace("None", "")
#
# if (alinksreplace2 != ''):
# if "publications" not in alinksreplace2:
# a = "https://news.google.com" + alinksreplace2
# takeoutput3.append(a)
pandas.set_option('display.max_colwidth', None)
frame = {'Title':textoutput, 'Time':textoutput2, 'Source' : textoutput1}
frame1 = pandas.DataFrame(frame)
#frame2 = {'Link' : takeoutput3}
#frame3 = pandas.DataFrame(frame2)
frametostring = frame1.to_string(index=False)
#frametostring1 = frame3.to_string(index=False)
#print(frametostring)
put_code(frametostring)
#put_code(frametostring1)
#writer1.writerow([textoutput, textoutput1, textoutput2])
start_server(app, 82)
it worked fine but today Google update something and now it don't start. And it's because Google add before you continue pop-up. How I can bypass this so my script continue to work?
If it was Selenium I've just clicked the button but here I don't know what to do
Thanks you helping!
Is there a way to reload the code without restarting Zope when in Production ?
New features are implemented almost once in 2 days and have to be uploaded to the server. The only way it works currently is by restarting the zeo server and all instances. Can't use "plone.reload" as it only works in the development environment when the debug mode is on. Below is the buildout.cfg content
[buildout]
parts =
# instance
zeo
client1
client2
client3
zopepy
zopeskel
test
# mysql
# varnish-build
# varnish
supervisor
pidproxy
extends =
https://dist.plone.org/versions/zope-2-13-19-versions.cfg
find-links =
https://dist.plone.org/release/4.2.4
https://dist.plone.org/thirdparty
extensions =
mr.developer
# buildout.dumppickedversions
sources = sources
versions = versions
develop =
[versions]
plone.recipe.zeoserver = 1.3.1
plone.recipe.zope2instance = 4.2.8
five.localsitemanager = 2.0.5
Products.PluginRegistry = 1.3
Products.CMFCore = 2.2.7
Products.GenericSetup = 1.7.3
Products.ZSQLMethods = 2.13.4
zope.interface = 3.6.7
zope.app.publication = 3.12.0
#setuptools = 17.1.1
funcsigs = 0.4
openpyxl = 2.4.0
plone.reload = 2.0.2
[zeo]
recipe = plone.recipe.zeoserver
zeo-address = 127.0.0.1:9100
zeo-var = ${buildout:directory}/var
blob-storage = ${zeo:zeo-var}/blobstorage
#ggs = plone.app.blob
[client1]
recipe = plone.recipe.zope2instance
http-address = 9081
zeo-client = on
zeo-address = ${zeo:zeo-address}
shared-blob = on
blob-storage = ${zeo:zeo-var}/blobstorage
user = admin:Slick_RP#21!
products = ${buildout:directory}/matrix_git/prod/
debug-mode = off
verbose-security = off
eggs =
# pillow
mysql-python
simplejson
haversine
openpyxl
requests
httpagentparser
ordereddict
python-memcached
# python-crontab
# setuptools
Products.CMFCore
Products.ZMySQLDA
# Products.SQLAlchemyDA
Products.PluggableAuthService
# Products.ZopeProfiler
# Products.MemoryProfiler
# reportlab
Products.BeakerSessionDataManager
collective.fsexternalmethod
plone.reload
zope-conf-additional =
extensions ${buildout:directory}/matrix_git/Extensions
<product-config beaker>
session.type file
session.data_dir ${buildout:directory}/var/sessions/data
session.lock_dir ${buildout:directory}/var/sessions/lock
session.key beaker.session
session.secret secret
</product-config>
zcml =
collective.fsexternalmethod
plone.reload
event-log-max-size = 5 MB
event-log-old-files = 5
access-log-max-size = 20 MB
access-log-old-files = 10
[client2]
recipe = plone.recipe.zope2instance
http-address = 9082
zeo-client = ${client1:zeo-client}
zeo-address = ${client1:zeo-address}
blob-storage = ${client1:blob-storage}
shared-blob = ${client1:shared-blob}
user = ${client1:user}
products = ${client1:products}
debug-mode = off
verbose-security = off
eggs = ${client1:eggs}
zcml = ${client1:zcml}
zope-conf-additional = ${client1:zope-conf-additional}
event-log-max-size = ${client1:event-log-max-size}
event-log-old-files = ${client1:event-log-old-files}
access-log-max-size = ${client1:access-log-max-size}
access-log-old-files = ${client1:access-log-old-files}
[client3]
recipe = plone.recipe.zope2instance
http-address = 9083
zeo-client = ${client1:zeo-client}
zeo-address = ${client1:zeo-address}
blob-storage = ${client1:blob-storage}
shared-blob = ${client1:shared-blob}
user = ${client1:user}
products = ${client1:products}
debug-mode = off
verbose-security = off
eggs = ${client1:eggs}
zcml = ${client1:zcml}
zope-conf-additional = ${client1:zope-conf-additional}
event-log-max-size = ${client1:event-log-max-size}
event-log-old-files = ${client1:event-log-old-files}
access-log-max-size = ${client1:access-log-max-size}
access-log-old-files = ${client1:access-log-old-files}
[zopepy]
recipe = zc.recipe.egg
eggs = ${client1:eggs}
interpreter = zopepy
scripts = zopepy
[test]
recipe = zc.recipe.testrunner
defaults = ['--auto-color', '--auto-progress']
eggs =
${client1:eggs}
[zopeskel]
recipe = zc.recipe.egg
eggs =
ZopeSkel
PasteScript
[mysql]
recipe = zest.recipe.mysql
# Note that these urls usually stop working after a while... thanks...
mysql-url = http://downloads.mysql.com/archives/mysql-5.0/mysql-5.0.86.tar.gz
mysql-python-url = http://pypi.python.org/packages/source/M/MySQL-python/MySQL-python-1.2.3.tar.gz
[varnish-build]
recipe = zc.recipe.cmmi
url = ${varnish:download-url}
[varnish]
recipe = plone.recipe.varnish
daemon = ${buildout:parts-directory}/varnish-build/sbin/varnishd
bind = 127.0.0.1:8000
backends = 127.0.0.1:8080
cache-size = 50M
[pidproxy]
recipe = zc.recipe.egg
eggs = supervisor
scripts = pidproxy
[supervisor]
recipe = collective.recipe.supervisor
port = 127.0.0.1:24007
serverurl = http://127.0.0.1:24007
programs =
# 10 mysql ${buildout:directory}/bin/pidproxy [${buildout:directory}/var/mysql/mysql.pid ${buildout:directory}/parts/mysql/install/bin/mysqld_safe --pid-file=${buildout:directory}/var/mysql/mysql.pid --socket=${buildout:directory}/var/mysql.socket] ${buildout:directory} true
20 zeo ${buildout:directory}/bin/zeo [console] ${buildout:directory} true
30 client1 ${buildout:directory}/bin/client1 [console] ${buildout:directory} true
40 client2 ${buildout:directory}/bin/client2 [console] ${buildout:directory} true
50 client3 ${buildout:directory}/bin/client3 [console] ${buildout:directory} true
If you are deploying so frequently, you can either deploy at low traffic times (i.e. at night).
If the website should be always up, you could have two sets of Plone instances: one set is active and serving requests, the second one is not active.
When updating, the offline servers are updated and when they are done, a switch is turned (HAProxy for example) to replace the active servers.
You could even have all servers available always, but for updating, put some offline while they are updated.
As others, and you as well are pointing, I would never use plone.reload or similar development tools in production.
Yes there is a way, allthough I'd never do that in production it's a great time-saver when developing, to do a reload within a browser-view:
from plone.reload.code import reload_code
from Products.Five.browser import BrowserView
class View(BrowserView):
def __call__(self):
reload_code()
return 'Code loaded.'
Then call the view with the name you registered it with upon the site. This even works in non-debug-mode while the instance is running in background. Tested with a standalone instance (non-ZEO).
I'm trying to use buildbot for CI purposes. I have setup a buildmaster and buildslave. And, they are both connected. (I'm attaching my master.cfg below)
I have the following problems:
a) I can see the changes committed on the Waterfall page, which means SVNPoller is working fine. However, none of the changes are getting built. I get a "?" on the buildbot page.
b) When I try to do a force build from http:// localhost:8010/builders, I get an error in the logs:
[HTTPChannel,1,10.0.0.58] ..but not authorized
c = BuildmasterConfig = {}
from buildbot.buildslave import BuildSlave
c['slaves'] = [BuildSlave("example-slave", "pass")]
c['slavePortnum'] = 9989
from buildbot.changes.svnpoller import SVNPoller
c['change_source'] = []
c['change_source'].append(SVNPoller(
'file:///my/repo/path/trunk',
pollinterval=300))
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.changes import filter
c['schedulers'] = []
c['schedulers'].append(SingleBranchScheduler(
name="all",
change_filter=filter.ChangeFilter(branch='trunk'),
treeStableTimer=None,
builderNames=["runtests"]))
c['schedulers'].append(ForceScheduler(
name="force",
builderNames=["runtests"]))
from buildbot.process.factory import BuildFactory
from buildbot.steps.source import Git
from buildbot.steps.source import SVN
from buildbot.steps.shell import ShellCommand
from buildbot.steps import source, shell
from buildbot.process import factory
f = factory.BuildFactory()
f.addStep(source.SVN(svnurl="file:///my/repo/path/trunk/", mode="copy"))
f.addStep(shell.ShellCommand(command=["cmake", "."]))
f.addStep(shell.ShellCommand(command=["make", "all"]))
from buildbot.config import BuilderConfig
c['builders'] = []
c['builders'].append(
BuilderConfig(name="runtests",
slavenames=["example-slave"],
factory=f))
c['status'] = []
from buildbot.status import html
from buildbot.status.web import authz, auth
authz_cfg=authz.Authz(
auth=auth.BasicAuth([("userid","password")]),
gracefulShutdown = False,
forceBuild = 'auth', # use this to test your slave once it is set up
forceAllBuilds = True,
pingBuilder = False,
stopBuild = False,
stopAllBuilds = False,
cancelPendingBuild = False,
)
c['status'].append(html.WebStatus(http_port=8010, authz=authz_cfg))
c['title'] = "My Project"
c['titleURL'] = "http://my/url"
c['buildbotURL'] = "http://localhost:8010/"
c['db'] = {
'db_url' : "sqlite:///state.sqlite",
}
Change the following value:
forceBuild = True