getiing Fatal error: Cannot redeclare Aws\constantly() - sugarcrm

** i am using scheduler in sugarcrm.after adding some file getting error
I got this error but have no idea where i made a mistake. It would be really great if you could help me
**Fatal error: Cannot redeclare Aws\constantly() (previously declared in /var/www/html/axcessio/aws_new/Aws/functions.php:19) in /var/www/html/microservices/vendor/aws/aws-sdk-php/src/functions.php on line 22 **
class amiCleanup_schedulerScheduler
{
public function check_daily_reminder()
{
$GLOBALS['log']->error("Ami Clean Up scheduler =*= running");
global $db;
global $current_user;
$queryTime = "SELECT NOW() nowtime,CURDATE() nowdate,CURTIME() nowti";
$SchdlrTime = $db->query($queryTime);
$rowTime = $db->fetchByAssoc($SchdlrTime);
$current_dt = $rowTime['nowtime'];
$GLOBALS['log']->error(" Ami Clean Up scheduler =*= current_dt :: $current_dt");
$top_query = "select a.id as job_his_id,a.name as job_his,a.backup_delete_date_c as delete_date,a.backup_id_c as backup_id,b.name as jd_name,c.name as h_name,c.host_type_c as h_type,c.aws_region_c as aws_region from phs01_jobs_history a,phs01_jobs_details b ,mhs01_hosts c,phs01_jobs_details_phs01_jobs_history_1_c d,mhs01_hosts_phs01_jobs_details_1_c e where d.phs01_jobs_details_phs01_jobs_history_1phs01_jobs_details_ida = b.id and d.phs01_jobs_details_phs01_jobs_history_1phs01_jobs_history_idb = a.id and e.mhs01_hosts_phs01_jobs_details_1phs01_jobs_details_idb = b.id and e.mhs01_hosts_phs01_jobs_details_1mhs01_hosts_ida = c.id and a.delete_ami_c = '0'and a.backup_id_c IS NOT NULL";
$top_query_run = $db->query($top_query);
$top_query_count = $db->getAffectedRowCount($top_query_run);
$GLOBALS['log']->error("Ami Clean Up scheduler =*= top_query_count :: $top_query_count");
while($query_list = $db->fetchByAssoc($top_query_run) )
{
$job_his_id = $query_list['job_his_id'];
$job_his = $query_list['job_his'];
$delete_date = $query_list['delete_date'];
$backup_id = $query_list['backup_id'];
$aws_region = $query_list['aws_region'];
$api_key = $query_list['api_key'];
$secret_key = $query_list['secret_key'];
$h_type = $query_list['h_type'];
$GLOBALS['log']->error("Ami Clean Up scheduler =*=job_his_id :: $job_his_id");
$GLOBALS['log']->error("Ami Clean Up scheduler =*=job_his :: $job_his");
$GLOBALS['log']->error("Ami Clean Up scheduler =*=delete_date :: $delete_date");
$GLOBALS['log']->error("Ami Clean Up scheduler =*=backup_id :: $backup_id");
$GLOBALS['log']->error("Ami Clean Up scheduler =*=h_type :: $h_type");
$GLOBALS['log']->error("Ami Clean Up scheduler =*= aws_region :: $aws_region");
$GLOBALS['log']->error("Ami Clean Up scheduler =*= api_key :: $api_key");
$GLOBALS['log']->error("Ami Clean Up scheduler =*= secret_key :: $secret_key");
if($h_type == 'RDS')
{
$GLOBALS['log']->error("Ami Clean Up scheduler =*= inside if loop");
$delete = $this->delete_RDS($backup_id,$job_his_id,$aws_region,$api_key,$secret_key);
$GLOBALS['log']->error("Ami Clean Up scheduler =*= delete :: $delete");
$update = "update phs01_jobs_history set delete_backup_c = '0' where id = '".$delete."'";
$update_run = $db->query($update);
}
if($h_type == 'EC2')
{
$GLOBALS['log']->error("Ami Clean Up scheduler =*= inside if loop");
$delete = $this->delete_EC2($backup_id,$job_his_id,$aws_region,$api_key,$secret_key);
$GLOBALS['log']->error("Ami Clean Up scheduler =*= delete :: $delete");
$update = "update phs01_jobs_history set delete_backup_c = '0' and a.delete_ami_c = '1' where id = '".$delete."'";
$update_run = $db->query($update);
}
}
}
public function delete_RDS($backup_id,$job_his_id,$aws_region,$api_key,$secret_key)
{
$GLOBALS['log']->error("Ami Clean Up scheduler =*= inside delete_RDS backup_id $backup_id");
$RdsClient = RdsClient::factory(array(
'credentials.cache' => $cacheAdapter,
'version' => '2014-10-31',
'region' => 'us-west-1',
));
if(!empty($backup_id))
{
$GLOBALS['log']->error("Ami Clean Up scheduler =*= result :: $RdsClient");
$result = json_encode($RdsClient);
$GLOBALS['log']->error("Ami Clean Up scheduler =*= result :: $result");
try{$result = $RdsClient->deleteDBSnapshot(array(
'DBSnapshotIdentifier' => $backup_id, // REQUIRED
));
}catch(exception $e){$GLOBALS['log']->error("Ami Clean Up scheduler =*= error in delete snapshot caught in ctach block");}
$GLOBALS['log']->error("Ami Clean Up scheduler =*= result :: $result");
}
return $job_his_id;
}
function delete_EC2($backup_id,$job_his_id,$aws_region,$api_key,$secret_key)
{
$GLOBALS['log']->error("Ami Clean Up scheduler =*= inside delete_EC2 backup_id $backup_id");
$EC2client = EC2client::factory(array(
'credentials.cache' => $cacheAdapter,
'version' => '2016-11-15',
'region' => 'us-west-1',
));
$result1 = $EC2client->describeImages([
'DryRun' => false,
//'ExecutableUsers' => ['<string>', ...],
'ImageIds' => [$backup_id]
]);
$SnapshotId = $result1['Images'][0]['BlockDeviceMappings'][0]['Ebs']['SnapshotId'];
$GLOBALS['log']->error("Ami Clean Up scheduler =*= SnapshotId = $SnapshotId");
$GLOBALS['log']->error("Ami Clean Up scheduler =*= EC2client = $EC2client");
$GLOBALS['log']->error("Ami Clean Up scheduler =*= describeImages result1 ::$result1");
if(!empty($SnapshotId))
{
try{$result2 = $EC2client->deregisterImage(array(
'DryRun' => false,
'ImageId' => $backup_id, // REQUIRED
));}catch(exception $e){$GLOBALS['log']->error("Ami Clean Up scheduler =*= error in deregister image caught in ctach block");}
$GLOBALS['log']->error("Delete Backup scheduler =*= result for deregisterImage result2 :: $result2");
try{$result3 = $EC2client->deleteSnapshot([
'DryRun' =>false,
'SnapshotId' =>$SnapshotId , // REQUIRED
]);}catch(exception $e){$GLOBALS['log']->error("Ami Clean Up scheduler =*= error in deleteSnapshot caught in ctach block");}
$GLOBALS['log']->error("Delete Backup scheduler =*= result for deleteSnapshot result3 :: $result3");
}
return $job_his_id;
}
}
?>

This error occurs if the file 'vendor/aws/aws-sdk-php/src/functions.php' is included on twice. You need use include_once or it is even better to use a vendor/autoload.php

Related

FastAPI Pytest why does client returns 422

I'm trying to implement testing my post route. It works in my project. I have problems only with pytest.
main.py:
#app.post('/create_service', status_code=status.HTTP_201_CREATED)
async def post_service(
response: Response, service: CreateService, db: Session = Depends(get_db)
):
service_model = models.Service()
service_name = service.name
service_instance = db.query(models.Service).filter(
models.Service.name == service_name
).first()
if service_instance is None:
service_model.name = service_name
db.add(service_model)
db.commit()
serviceversion_model = models.ServiceVersion()
service_instance = db.query(models.Service).filter(
models.Service.name == service_name
).first()
serviceversion_instance = db.query(models.ServiceVersion).filter(
models.ServiceVersion.service_id == service_instance.id
).filter(models.ServiceVersion.version == service.version).first()
if serviceversion_instance:
raise HTTPException(
status_code=400, detail='Version of service already exists'
)
serviceversion_model.version = service.version
serviceversion_model.is_used = service.is_used
serviceversion_model.service_id = service_instance.id
db.add(serviceversion_model)
db.commit()
db.refresh(serviceversion_model)
service_dict = service.dict()
for key in list(service_dict):
if isinstance(service_dict[key], list):
sub_dicts = service_dict[key]
if not sub_dicts:
response.status_code = status.HTTP_400_BAD_REQUEST
return HTTPException(status_code=400, detail='No keys in config')
servicekey_models = []
for i in range(len(sub_dicts)):
servicekey_model = models.ServiceKey()
servicekey_models.append(servicekey_model)
servicekey_models[i].service_id = service_instance.id
servicekey_models[i].version_id = serviceversion_model.id
servicekey_models[i].service_key = sub_dicts[i].get('service_key')
servicekey_models[i].service_value = sub_dicts[i].get('service_value')
db.add(servicekey_models[i])
db.commit()
return 'created'
test_main.py:
def test_create_service(client, db: Session = Depends(get_db)):
key = Key(service_key='testkey1', service_value='testvalue1')
service = CreateService(
name="testname1",
version="testversion1",
is_used=True,
keys=[key, ]
)
response = client.post("/create_service", params={"service": service.dict()})
assert response.status_code == 200
I tried to post service as json, as CreateService instance and finally as params dictionary. I have no errors at response line only with the last one . But I got 422 response status code. What is wrong?
If it can help:
schemas.py
class Key(BaseModel):
service_key: str
service_value: str
class CreateService(BaseModel):
name: str
version: str
is_used: bool
keys: list[Key]
class Config:
orm_mode = True
Don't use params. Use json:
response = client.post("/create_service", json=service.dict())

Airflow - How to push xcom from ecs operator?

In my airflow dag, I have an ecs_operator task followed by python operator task. I want to push some messages from ECS task to python task using xcom feature of airflow. I tried the option do_xcom_push=True with no result. Find below sample dag.
dag = DAG(
dag_name, default_args=default_args, schedule_interval=None)
start = DummyOperator(task_id = 'start'
,dag =dag)
end = DummyOperator(task_id = 'end'
,dag =dag)
ecs_operator_args = {
'launch_type': 'FARGATE',
'task_definition': 'task-def:2',
'cluster': 'cluster-name',
'region_name': 'region',
'network_configuration': {
'awsvpcConfiguration':
{}
}
}
ecs_task = ECSOperator(
task_id='x_com_test'
,**ecs_operator_args
,do_xcom_push=True
,params={'my_param': 'Parameter-1'}
,dag=dag)
def pull_function(**kwargs):
ti = kwargs['ti']
msg = ti.xcom_pull(task_ids='x_com_test',key='the_message')
print("received message: '%s'" % msg)
pull_task = PythonOperator(
task_id='pull_task',
python_callable=pull_function,
provide_context=True,
dag=dag)
start >> ecs_task >> pull_task >> end
You need to setup a cloudwatch log group for the container.
ECSOperator needs to be extended to support pushing to xcom:
from collections import deque
from airflow.utils import apply_defaults
from airflow.contrib.operators.ecs_operator import ECSOperator
class MyECSOperator(ECSOperator):
#apply_defaults
def __init__(self, xcom_push=False, **kwargs):
super(CLECSOperator, self).__init__(**kwargs)
self.xcom_push_flag = xcom_push
def execute(self, context):
super().execute(context)
if self.xcom_push_flag:
return self._last_log_event()
def _last_log_event(self):
if self.awslogs_group and self.awslogs_stream_prefix:
task_id = self.arn.split("/")[-1]
stream_name = "{}/{}".format(self.awslogs_stream_prefix, task_id)
events = self.get_logs_hook().get_log_events(self.awslogs_group, stream_name)
last_event = deque(events, maxlen=1).pop()
return last_event["message"]
dag = DAG(
dag_name, default_args=default_args, schedule_interval=None)
start = DummyOperator(task_id = 'start'
,dag =dag)
end = DummyOperator(task_id = 'end'
,dag =dag)
ecs_operator_args = {
'launch_type': 'FARGATE',
'task_definition': 'task-def:2',
'cluster': 'cluster-name',
'region_name': 'region',
'awslogs_group': '/aws/ecs/myLogGroup',
'awslogs_stream_prefix': 'myStreamPrefix',
'network_configuration': {
'awsvpcConfiguration':
{}
}
}
ecs_task = MyECSOperator(
task_id='x_com_test'
,**ecs_operator_args
,xcom_push=True
,params={'my_param': 'Parameter-1'}
,dag=dag)
def pull_function(**kwargs):
ti = kwargs['ti']
msg = ti.xcom_pull(task_ids='x_com_test',key='return_value')
print("received message: '%s'" % msg)
pull_task = PythonOperator(
task_id='pull_task',
python_callable=pull_function,
provide_context=True,
dag=dag)
start >> ecs_task >> pull_task >> end
ecs_task will take the last event from the log group before finishing executing, and push it to xcom.
Apache-AWS has a new commit that pretty much implements what #Бојан-Аџиевски mentioned above, so you don't need to write your custom ECSOperator. Available as of version 1.1.0
All you gotta do is to provide the do_xcom_push=True when calling the ECSOperator and provide the correct awslogs_group and awslogs_stream_prefix.
Make sure your awslogs_stream_prefix follows the following format:
prefix-name/container-name
As this is what ECS directs logs to.

Quartz job getting triggered at wrong times

Issue Description:
We are having a quartz job which is scheduled to run at different times during the day say 1PM and 4PM and 7PM. Issue is that the job is getting triggered and executed on times other than the scheduled times also.
What we did:
We have already tried shutting down our servers completely (JBoss) and clearing quartz tables but that hasn't worked at all.
We are using quartz 1.6 and I want to know, if there is any bug in the version and if quartz upgrade can resolve the issue OR
If there is any problem with setting up quartz properties and if properties can be tweaked to resolve this.
Edit - more details below:
I have corrected the job timings now. Also, job is getting triggered at any random times other than these we mentioned in its schedule. There is no pattern in when it is getting triggered apart from the scheduled times.
Below are the job and trigger details in DB properties file. Based on these job details and trigger details will be set up in DB in Quartz_Triggers and Quartz_CronTrigger tables:
<job>
<job-detail>
<name>Match Job</name>
<group>JB_QUARTZ</group>
<job-class>com.qd.qehadmin.common.scheduler.MatchJob</job-class>
<volatility>false</volatility>
<durability>true</durability>
<recover>true</recover>
</job-detail>
<trigger>
<cron>
<name>Match Job Trigger</name>
<group>JB_QUARTZ</group>
<job-name>match Job</job-name>
<job-group>JB_QUARTZ</job-group>
<cron-expression>0 0 13,16,19 * * ?</cron-expression>
</cron>
</trigger>
</job>
Below are the quartz properties details in DB:
# Configure Main Scheduler Properties
#============================================================================
org.quartz.scheduler.instanceName = JB_QUARTZ
org.quartz.scheduler.instanceId = AUTO
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.threadCount = 10
org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.useProperties = true
org.quartz.jobStore.dataSource = jobSchedulerDS
org.quartz.jobStore.tablePrefix = JB_QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.clusterCheckinInterval = 10000
#============================================================================
# Configure Datasources
#============================================================================
org.quartz.dataSource.jobSchedulerDS.jndiURL=java:JBAPI
org.quartz.dataSource.jobSchedulerDS.java.naming.factory.initial=org.jnp.interfaces.NamingContextFactory
#java.naming.provider.url=jnp://localhost:3099
java.naming.provider.url=jnp://166.20.337.12:8441,166.20.337.14:8441,166.20.337.16:8441,166.20.337.19:8441
#============================================================================
Below is the Java code for job:
public void execute(JobExecutionContext ctx) throws JobExecutionException
{
try{
//Scheduler scheduler = new StdSchedulerFactory().getScheduler();
SendEmail sm = new SendEmail();
boolean running = false;
if (ctx.getJobDetail().getKey().getName().equalsIgnoreCase("match Job") ) {
LogFile.MATCH_JOB.logInfo("Match jobs size : "+ctx.getScheduler().getCurrentlyExecutingJobs().size(), this.getClass().getName());
if(ctx.getScheduler().getCurrentlyExecutingJobs().size()==1)
{
initClient(); //This method will read properties from DB
startTime=System.currentTimeMillis();
Match(); //This method will execute job level code
endTime=System.currentTimeMillis();
LogFile.MATCH_JOB.logInfo("***Match job ends*** Loadtest: "+Constants.loadTest+" in time: "+(endTime-startTime)/1000 + "secs", this.getClass().getName());
}
else
{
running=true;
}
}
if(running)
{
LogFile.MATCH_JOB.logInfo("The Match job is already running – sending email",this.getClass().getName());
sm.sendEmail();
}
}catch(Exception e){
e.printStackTrace();
LogFile.MATCH_JOB.logError("***Match Job Error*** " +e.getStackTrace(), this.getClass().getName());
}
}

Quartz schedulers controlled from an external app

I am currently working on Quartz.NET (version 2.3.1). I have created different Schedulers with different jobs using the code below (for each scheduler):
NameValueCollection properties = new NameValueCollection();
properties["quartz.scheduler.instanceName"] = "QuartzSchedulerTest";
properties["quartz.scheduler.instanceId"] = AUTO;
properties["quartz.threadPool.type"] = "Quartz.Simpl.SimpleThreadPool, Quartz";
properties["quartz.threadPool.threadPriority"] = "Normal";
properties["quartz.jobStore.misfireThreshold"] = "60000";
properties["quartz.jobStore.clustered"] = "true";
properties["quartz.jobStore.tablePrefix"] = "QRTZ_";
properties["quartz.jobStore.type"] = "Quartz.Impl.AdoJobStore.JobStoreTX, Quartz";
properties["quartz.jobStore.dataSource"] = "default";
properties["quartz.jobStore.useProperties"] = "false";
properties["quartz.jobStore.driverDelegateType"] = "Quartz.Impl.AdoJobStore.SqlServerDelegate, Quartz";
properties["quartz.dataSource.default.connectionString"] = "myConnString"
properties["quartz.dataSource.default.provider"] = "SqlServer-20";
// Get scheduler
ISchedulerFactory sf = new StdSchedulerFactory(properties);
IScheduler scheduler = sf.GetScheduler();
Now I have all scheduling information stored on a SQL database and everything works.
I created a new Console Application because I need to manage all schedulers (get schedulers list, jobs for each scheduler, send command to pause and resume triggers ecc...).
This is the code I wrote to try to have handlers to all existing schedulers:
NameValueCollection properties = new NameValueCollection();
properties["quartz.threadPool.type"] = "Quartz.Simpl.SimpleThreadPool, Quartz";
properties["quartz.threadPool.threadPriority"] = "Normal";
properties["quartz.jobStore.misfireThreshold"] = "60000";
properties["quartz.jobStore.clustered"] = "true";
properties["quartz.jobStore.tablePrefix"] = "QRTZ_";
properties["quartz.jobStore.type"] = "Quartz.Impl.AdoJobStore.JobStoreTX, Quartz";
properties["quartz.jobStore.dataSource"] = "default";
properties["quartz.jobStore.useProperties"] = "false";
properties["quartz.jobStore.driverDelegateType"] = "Quartz.Impl.AdoJobStore.SqlServerDelegate, Quartz";
properties["quartz.dataSource.default.connectionString"] = "myConnString"
properties["quartz.dataSource.default.provider"] = "SqlServer-20";
// Get scheduler
ISchedulerFactory sf = new StdSchedulerFactory(properties);
var schedulers = sf.AllSchedulers;
But no handlers returned (schedulers count is 0). Can anyone tell me how can I get all schedulers? Is it possible?
Sorry for my english and thanks in advance.
You have to connect to each scheduler instance directly using remoting. The schedulers are not aware of each other and there is no way to get a list of all of the schedulers that are in a cluster.
Once you connect to each scheduler then you'll be able to pull a list of running jobs and manipulate the job schedule as necessary. If all of the schedulers are in a cluster, then you don't have to connect to all of them to manipulate the jobs themselves. You can do that from any of the instances. However, the list of running jobs has to be compiled by asking each scheduler individually.

Testing RxJS bufferWithTime on node.js

I'm TDDing RxJS solution and using bufferWithTime Why does the res in example code get value [] (empty array)? Is it a problem in my code or the RxJS library? Running it on node.js v0.10.30 with rx version 2.2.27.
Following can be run in nodejs coffeescript REPL
Rx = require 'rx'
onNext = Rx.ReactiveTest.onNext
TEST_EVENT_A = { messageName: 'test_event_a', namespace: 'test' }
events = [onNext(50, TEST_EVENT_A), onNext(100, TEST_EVENT_A)]
scheduler = new Rx.TestScheduler
stream = scheduler.createHotObservable events
excludeEmpty = (event) -> console.log "Filtering...", event; event.length > 0
countValues = (event) -> console.log "Counting...", event; event.length
res = scheduler.startWithTiming((=> stream.bufferWithTime(10).filter(excludeEmpty).map(countValues)), 0, 0, 1000).messages
# => []
After debugging and reading docs, I noticed the following sentence in bufferedWithTime documentation: [scheduler=Rx.Scheduler.timeout] (Scheduler): Scheduler to run buffer timers on. If not specified, the timeout scheduler is used.
I had thought that when creating observable from TestScheduler, it will use that scheduler for all it's methods but the scheduler has to be passed in explicitly. So below is the correct solution:
print = (event) -> console.log "Event: ", event
Rx = require 'rx'
onNext = Rx.ReactiveTest.onNext
TEST_EVENT_A = { messageName: 'test_event_a', namespace: 'test' }
events = [onNext(50, TEST_EVENT_A), onNext(100, TEST_EVENT_A)]
scheduler = new Rx.TestScheduler
stream = scheduler.createHotObservable events
excludeEmpty = (event) -> console.log "Filtering...", event; event.length > 0
countValues = (event) -> console.log "Counting...", event; event.length
# Notice the 2nd argument to bufferWithTime
res = scheduler.startWithTiming((=> stream.bufferWithTime(100, scheduler).filter(excludeEmpty).map(countValues)), 0, 0, 1500).messages
# => res[0].value.value == 2