Adding file to GitHub using java client - org.eclipse.egit.github.core - github

I am trying to add a file to a repository using the below code but I am getting below error. I just want to add a file for now.
org.eclipse.egit.github.core.client.RequestException: Invalid request.
For 'properties/email', nil is not a string. For 'properties/name',
nil is not a string. For 'properties/email', nil is not a string. For
'properties/name', nil is not a string. (422) at
org.eclipse.egit.github.core.client.GitHubClient.createException(GitHubClient.java:552)
at
org.eclipse.egit.github.core.client.GitHubClient.sendJson(GitHubClient.java:643)
at
org.eclipse.egit.github.core.client.GitHubClient.post(GitHubClient.java:757)
at
org.eclipse.egit.github.core.service.DataService.createCommit(DataService.java:397)
I sense that it is expecting some properties but how to supply this is not clear. What is that i am missing?
Referring to https://gist.github.com/Detelca/2337731
import java.io.IOException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import org.eclipse.egit.github.core.Blob;
import org.eclipse.egit.github.core.Commit;
import org.eclipse.egit.github.core.CommitUser;
import org.eclipse.egit.github.core.Reference;
import org.eclipse.egit.github.core.Repository;
import org.eclipse.egit.github.core.RepositoryCommit;
import org.eclipse.egit.github.core.Tree;
import org.eclipse.egit.github.core.TreeEntry;
import org.eclipse.egit.github.core.TypedResource;
import org.eclipse.egit.github.core.User;
import org.eclipse.egit.github.core.client.GitHubClient;
import org.eclipse.egit.github.core.service.CommitService;
import org.eclipse.egit.github.core.service.DataService;
import org.eclipse.egit.github.core.service.RepositoryService;
import org.eclipse.egit.github.core.service.UserService;
public class GHWriter {
public static void main(String[] args) {
try {
new GHWriter().writeFile("test_two.txt", "test content");
} catch (IOException e) {
e.printStackTrace();
}
}
//https://gist.github.com/Detelca/2337731
public boolean writeFile(String fileName, String fileContent) throws IOException{
// initialize github client
GitHubClient client = new GitHubClient();
//TextView password = (TextView)findViewById(R.id.textViewPassword);
client.setCredentials("username", "password");
// create needed services
RepositoryService repositoryService = new RepositoryService();
CommitService commitService = new CommitService(client);
DataService dataService = new DataService(client);
// get some sha's from current state in git
Repository repository = repositoryService.getRepository("username", "repositoryName");
String baseCommitSha = repositoryService.getBranches(repository).get(0).getCommit().getSha();
RepositoryCommit baseCommit = commitService.getCommit(repository, baseCommitSha);
String treeSha = baseCommit.getSha();
// create new blob with data
Blob blob = new Blob();
blob.setContent("[\"" + System.currentTimeMillis() + "\"]").setEncoding(Blob.ENCODING_UTF8);
String blob_sha = dataService.createBlob(repository, blob);
Tree baseTree = dataService.getTree(repository, treeSha);
// create new tree entry
TreeEntry treeEntry = new TreeEntry();
treeEntry.setPath("testfile.txt");
treeEntry.setMode(TreeEntry.MODE_BLOB);
treeEntry.setType(TreeEntry.TYPE_BLOB);
treeEntry.setSha(blob_sha);
treeEntry.setSize(blob.getContent().length());
Collection<TreeEntry> entries = new ArrayList<TreeEntry>();
entries.add(treeEntry);
Tree newTree = dataService.createTree(repository, entries, baseTree.getSha());
// create commit
Commit commit = new Commit();
commit.setMessage("first commit at " + new Date(System.currentTimeMillis()).toLocaleString());
commit.setTree(newTree);
UserService userService = new UserService( client );
User user = userService.getUser();
CommitUser author = new CommitUser();
author.setName( user.getName() );
Calendar now = Calendar.getInstance();
author.setDate(now.getTime());
commit.setAuthor(author);
commit.setCommitter(author);
List<Commit> listOfCommits = new ArrayList<Commit>();
listOfCommits.add(new Commit().setSha(baseCommitSha));
// listOfCommits.containsAll(base_commit.getParents());
commit.setParents(listOfCommits);
// commit.setSha(base_commit.getSha());
Commit newCommit = dataService.createCommit(repository, commit);
// create resource
TypedResource commitResource = new TypedResource();
commitResource.setSha(newCommit.getSha());
commitResource.setType(TypedResource.TYPE_COMMIT);
commitResource.setUrl(newCommit.getUrl());
// get master reference and update it
Reference reference = dataService.getReference(repository, "heads/master");
reference.setObject(commitResource);
dataService.editReference(repository, reference, true);
System.out.println("Committed URL: "+ newCommit.getUrl());
return false;
}
}
Thanks

After some debugging, found that email and name values are coming as null which is the source of the issue.
Adding below two lines will solve the issue:
author.setName( userName );
author.setEmail(email);

Related

How to shorten the URL for a Restful service

I have a maven java web application developed using Netbeans. I have figured out to run a parameter based Restful service successfully.
The URL contains three names in separated by slashes before providing the parameter.
http://localhost:8080/chims/api/data/list?name=district_list
Can I have a URL with less slashes like
http://localhost:8080/chims/data?name=district_list
This is the applicatoin config file.
package org.netbeans.rest.application.config;
import javax.ws.rs.core.Application;
#javax.ws.rs.ApplicationPath("api")
public class ApplicationConfig extends Application {
}
This is the service file.
package lk.gov.health.phsp.ws;
import java.util.List;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.Produces;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.enterprise.context.RequestScoped;
import javax.inject.Inject;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import lk.gov.health.phsp.bean.AreaApplicationController;
import lk.gov.health.phsp.entity.Area;
import lk.gov.health.phsp.enums.AreaType;
import org.json.JSONArray;
import org.json.JSONObject;
#Path("data")
#RequestScoped
public class ApiResource {
#Context
private UriInfo context;
#Inject
AreaApplicationController areaApplicationController;
/**
* Creates a new instance of GenericResource
*/
public ApiResource() {
}
#GET
#Path("list")
#Produces(MediaType.APPLICATION_JSON)
public String getJson(#QueryParam("name") String name) {
JSONObject jSONObjectOut;
if (name == null || name.trim().equals("")) {
jSONObjectOut = errorMessageInstruction();
} else {
switch (name) {
case "district_list":
jSONObjectOut = districtList();
break;
default:
jSONObjectOut = errorMessage();
}
}
String json = jSONObjectOut.toString();
return json;
}
private JSONObject districtList() {
JSONObject jSONObjectOut = new JSONObject();
JSONArray array = new JSONArray();
List<Area> ds = areaApplicationController.getAllAreas(AreaType.District);
for (Area a : ds) {
JSONObject ja = new JSONObject();
ja.put("district_id", a.getCode());
ja.put("district_name", a.getName());
array.put(ja);
}
jSONObjectOut.put("data", array);
jSONObjectOut.put("status", successMessage());
return jSONObjectOut;
}
private JSONObject successMessage() {
JSONObject jSONObjectOut = new JSONObject();
jSONObjectOut.put("code", 200);
jSONObjectOut.put("type", "success");
return jSONObjectOut;
}
private JSONObject errorMessage() {
JSONObject jSONObjectOut = new JSONObject();
jSONObjectOut.put("code", 400);
jSONObjectOut.put("type", "error");
jSONObjectOut.put("message", "Parameter name is not recognized.");
return jSONObjectOut;
}
private JSONObject errorMessageInstruction() {
JSONObject jSONObjectOut = new JSONObject();
jSONObjectOut.put("code", 400);
jSONObjectOut.put("type", "error");
jSONObjectOut.put("message", "You must provide a value for the parameter name.");
return jSONObjectOut;
}
}
I have not done any changes to the web.xml file. All the tutorials I went through did not give me a clear picture as to how and why I have to change it. Even without changing it, the web service works as expected.
How can I reduce the slashes in the URL?
The first thing you can do is remove the #Path("list"). A GET to /data will automatically go to the getJson method.
The next thing you can do is remove the api. You can do this by changing the "api" to "", "/", or "/*". All three will result in the same "/*". What happens when you do this is that Jersey will now take all requests that come to the server (and the same context). Any other servlets or static content will not be reachable.
To get around this, you can configure Jersey to run as a servlet filter instead of a servlet. Then configure it to forward unknown requests. See this post for how to configure this.

Java-Spark-Mongo: filter(dataset.col(newTime).$greater(oldTime)) not running on full data set

I have written a Java-Spark code with Mongo connector. It is supposed to fetch all those rows from MongoDB where column createdDate is greater than previous run's createdDate (like a max of high-water-mark value for each run which I am storing in Oracle. Initially the high-water-mark value in Oracle is 1900-01-01 00:00:00.000).
This column createdDate is ISODate type in mongoDB.
In my MongoDB data, the max value stored for this column createdDate is 2018-04-11 01:43:20.165.
But the filter in the code is not working as desired, i.e. in first run it is fetching sometimes till 2018-03-30 21:48:59.519, then on second or third run its fetching until the max value (2018-04-11 01:43:20.165).
Ideally it should happen in the first run itself when the initial high-water-mark value is 1900-01.....
Here is the code:
package mongo;
import java.net.URI;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.Statement;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.sql.Date;
import java.util.Iterator;
import java.util.List;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.bson.Document;
import static org.apache.spark.sql.functions.*;
import org.apache.spark.sql.DataFrameWriter;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import com.mongodb.spark.MongoSpark;
import com.mongodb.spark.rdd.api.java.JavaMongoRDD;
import java.sql.Timestamp;
public final class MongoRead
{
private static Connection con=null;
private static String readHighWaterMark(String table, String oraConn, String oraUser, String oraPswd) throws Exception
{
String highWaterMarkValue = "";
try
{
con=DriverManager.getConnection(oraConn,oraUser,oraPswd);
Statement stmt=con.createStatement();
ResultSet rs=stmt.executeQuery("select * from difa.HIGH_WATER_MARK_TABLE where table_nm='"+table+"'");
while(rs.next()){
highWaterMarkValue = rs.getString(3);
}
}
catch(Exception e){
e.printStackTrace();
con.close();
}
return highWaterMarkValue;
}
private static void setHighWaterMark(String key, String value) throws Exception
{
PreparedStatement pStmt=con.prepareStatement("UPDATE high_water_mark_table SET high_water_mark_VALUE='"+value+"' where table_nm='"+key+"'");
int i=pStmt.executeUpdate();
System.out.println(i+" records updated");
}
public static void main(final String[] args) throws Exception {
if(args.length<8){
System.out.println("Please provide correct inputs");
System.exit(1);
}
String mongoAddress = args[0];
String clusterAddress = args[1];
String oraConn = args[2];
String oraUser = args[3];
String oraPswd = args[4];
String tableNm = args[5];
String highWaterCol = args[6];
String loadType = args[7];
SparkSession spark = SparkSession.builder()
.master("local")
.appName("MongoSparkRecordReader")
.config("spark.mongodb.input.uri", mongoAddress)
.config("spark.mongodb.output.uri", mongoAddress)
.getOrCreate();
JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
try{
FileSystem fs = FileSystem.get(new URI(clusterAddress),jsc.hadoopConfiguration());
fs.delete(new Path(clusterAddress),true);
}
catch(Exception e){
e.printStackTrace();
}
/* ********Read data from MongoDB******* */
Dataset<Row> dataset = MongoSpark.load(jsc).toDF();
if(loadType.equalsIgnoreCase("I")){
String highWaterMark = readHighWaterMark(tableNm,oraConn,oraUser,oraPswd);
System.out.println("============HIGH_WATER_MARK_VALUE: "+highWaterMark);
Timestamp oldTime = Timestamp.valueOf(highWaterMark.replace("T"," ").replace("Z", ""));
//Fetches records that where createdDate is greater than previous high Water Mark.
Dataset<Row> filtered = dataset.filter(dataset.col(highWaterCol).$greater(oldTime)).persist();
filtered.toJSON().write().text(clusterAddress);
//Calculating the MAX(createdDate) in the fetched dataset.
Dataset<Row> maxHighWaterRow = filtered.agg(max(filtered.col(highWaterCol)).alias("newHighWater")).persist();
List<Timestamp> newHighWaterValue = maxHighWaterRow.select("newHighWater").as(Encoders.TIMESTAMP()).collectAsList();
Timestamp maxHighWaterMarkValue = newHighWaterValue.iterator().next();
SimpleDateFormat dtFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
Timestamp oldDate = Timestamp.valueOf(highWaterMark.replace('T', ' ').replace("Z",""));
//Setting HIGH_WATER_MARK_VALUE if a greater value is detected.
if(maxHighWaterMarkValue !=null && maxHighWaterMarkValue.after(oldDate)){
setHighWaterMark(tableNm,dtFormat.format(maxHighWaterMarkValue).replace(" ", "T").concat("Z"));
}
}
else{
dataset.toJSON().write().text(clusterAddress);
}
con.close();
jsc.close();
}
}
Any idea why the filter and $greater is not fetching the records correctly ?
I fixed this by adding .persist() for the Dataset:
/* ********Read data from MongoDB******* */
Dataset<Row> dataset = MongoSpark.load(jsc).toDF().persist();
....
..
...
Dataset<Row> filtered = dataset.filter(dataset.col(highWaterCol).$greater(old)).persist();
I don't know why without persist() the filter was not running on the whole dataset.

how to create workflow model programmatically in aem

I would like to create an aem workflow model programmatically due to the business requirement, and i used below code to implement it, but it throws an exception,this problem has tortured for a week. Could you please give some hints? Thanks in advance.
The code is below!
package com.sample.mms.workflow;
import java.util.Iterator;
import java.util.List;
import javax.jcr.RepositoryException;
import org.apache.commons.lang.StringUtils;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Properties;
import org.apache.felix.scr.annotations.Property;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.Service;
import org.apache.jackrabbit.api.security.user.User;
import org.apache.jackrabbit.api.security.user.UserManager;
import org.apache.sling.api.resource.LoginException;
import org.apache.sling.api.resource.Resource;
import org.apache.sling.api.resource.ResourceResolver;
import org.apache.sling.api.resource.ResourceResolverFactory;
import org.osgi.framework.Constants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.sample.mms.service.ConfigurationService;
import com.sample.mms.service.TopicOwnerBizService;
import com.sample.mms.util.WorkflowUtil;
import com.day.cq.workflow.WorkflowException;
import com.day.cq.workflow.WorkflowService;
import com.day.cq.workflow.WorkflowSession;
import com.day.cq.workflow.exec.WorkItem;
import com.day.cq.workflow.exec.WorkflowData;
import com.day.cq.workflow.exec.WorkflowProcess;
import com.day.cq.workflow.metadata.MetaDataMap;
import com.day.cq.workflow.model.WorkflowModel;
import com.day.cq.workflow.model.WorkflowNode;
import com.day.cq.workflow.model.WorkflowTransition;
#Component
#Service
#Properties({
#Property(name = Constants.SERVICE_DESCRIPTION, value = "general topic owner mark and approval each topic page step"),
#Property(name = Constants.SERVICE_VENDOR, value = "Someone"),
#Property(name = "process.label", value = "SAMPLE MMS NL - General Topic Owner Approval Process Step") })
public class TopicOwnerHandleProcessStep implements WorkflowProcess {
protected final Logger logger = LoggerFactory.getLogger(this.getClass());
#Reference
ResourceResolverFactory resourceResolverFactory;
#Reference
private ConfigurationService configurationService;
#Reference
private TopicOwnerBizService topicOwnerBizService;
#Reference
private WorkflowService workflowService;
#Override
public void execute(WorkItem workItem, WorkflowSession workflowSession, MetaDataMap metaDataMap)
throws WorkflowException {
final WorkflowData workflowData = workItem.getWorkflowData();
final String payLoadType = workflowData.getPayloadType();
if(!StringUtils.equals(payLoadType, "JCR_PATH")){
return;
}
final String payLoad = workflowData.getPayload().toString();
String topicOwnerGroup = configurationService.getTopic_owner_participant_group();
ResourceResolver resourceResolver = null;
UserManager userManager = null;
try {
resourceResolver = WorkflowUtil.getResourceResolver(resourceResolverFactory);
userManager = resourceResolver.adaptTo(UserManager.class);
List<User> list = WorkflowUtil.getUsersByGroup(userManager, topicOwnerGroup);
User user = null;
//create a model for each topic owner approval
WorkflowModel wm = workflowSession.createNewModel("sample mms topic owner review each topic page_" + System.currentTimeMillis());
WorkflowData wd = workflowSession.newWorkflowData("JCR_PATH", payLoad);
//get start node
WorkflowNode startNode = wm.getRootNode();
//wm.createNode("start",WorkflowNode.TYPE_START,"");
//get end node
WorkflowNode endNode = wm.getEndNode();
//wm.createNode("end",WorkflowNode.TYPE_END,"");
//create and split node
WorkflowNode andSplitNode = wm.createNode("and split",WorkflowNode.TYPE_AND_SPLIT,null);
//create and join node
WorkflowNode andJoinNode = wm.createNode("and join",WorkflowNode.TYPE_AND_JOIN,"");
wm.validate();
//create transition between start node and split node
wm.createTransition();
//wm.createTransition(startNode,andSplitNode,null);
//create transition between split node and end node
wm.createTransition(andJoinNode,endNode,null);
for(int i=0;i<list.size();i++){
user = list.get(i);
Iterator<Resource> resources = topicOwnerBizService.getResourceByTopicOwner(resourceResolver, payLoad, user.getID());
if(resources.hasNext()){
// ResourceResolver resourceResolver1 = WorkflowUtil.getResourceResolver(resourceResolverFactory);
// Session session = resourceResolver1.adaptTo(Session.class);
// WorkflowSession workflowSession1 = workflowService.getWorkflowSession(session);
// WorkflowModel wm = workflowSession1.getModel(configurationService.getTopicOwnerHandleSubsequentWorkflow());
// WorkflowData wd = workflowSession1.newWorkflowData("JCR_PATH", payLoad);
// wd.getMetaDataMap().put("topicOwnerParticipant", user.getID());
// workflowSession1.startWorkflow(wm, wd);
//create branch node for and split node
WorkflowNode topicOwnerParticipantNode = wm.createNode("topic owner participant",WorkflowNode.TYPE_PARTICIPANT,"");
//{timeoutMillis=0, timeoutHandler=com.sample.mms.workflow.TopicOwnerTimeoutHandler, PARTICIPANT=hunter.liang}
topicOwnerParticipantNode.getMetaDataMap().put("timeoutMillis", 0L);
topicOwnerParticipantNode.getMetaDataMap().put("timeoutHandler", "com.sample.mms.workflow.TopicOwnerTimeoutHandler");
topicOwnerParticipantNode.getMetaDataMap().put("PARTICIPANT", user.getID());
WorkflowNode orSplitNode = wm.createNode("or split",WorkflowNode.TYPE_OR_SPLIT,"");
WorkflowNode orJoinNode = wm.createNode("or join",WorkflowNode.TYPE_OR_JOIN,"");
//{PROCESS_AUTO_ADVANCE=true, PROCESS=com.sample.mms.workflow.TopicOwnerApprovalProcessStep, PROCESS_ARGS=approval}
WorkflowNode topicOwnerApprovalNode = wm.createNode("topic owner approval",WorkflowNode.TYPE_PROCESS,"");
topicOwnerApprovalNode.getMetaDataMap().put("PROCESS_AUTO_ADVANCE", true);
topicOwnerApprovalNode.getMetaDataMap().put("PROCESS", "com.sample.mms.workflow.TopicOwnerApprovalProcessStep");
topicOwnerApprovalNode.getMetaDataMap().put("PROCESS_ARGS", "approval");
//{PROCESS_AUTO_ADVANCE=true, PROCESS=com.sample.mms.workflow.TopicOwnerApprovalProcessStep, PROCESS_ARGS=reject}
WorkflowNode topicOwnerRejectNode = wm.createNode("topic owner reject",WorkflowNode.TYPE_PROCESS,"");
topicOwnerRejectNode.getMetaDataMap().put("PROCESS_AUTO_ADVANCE", true);
topicOwnerRejectNode.getMetaDataMap().put("PROCESS", "com.sample.mms.workflow.TopicOwnerApprovalProcessStep");
topicOwnerRejectNode.getMetaDataMap().put("PROCESS_ARGS", "reject");
WorkflowNode timeoutNode = wm.createNode("time out join",WorkflowNode.TYPE_PROCESS,"");
//wm.createTransition(andSplitNode,orSplitNode,"");
wm.createTransition(orSplitNode,topicOwnerApprovalNode,null);
wm.createTransition(orSplitNode,topicOwnerRejectNode,null);
WorkflowTransition orSplitAndTimeOutTransition = wm.createTransition(orSplitNode,timeoutNode,null);
orSplitAndTimeOutTransition.setRule("function check(){return false;}");
wm.createTransition(topicOwnerApprovalNode,orJoinNode,null);
wm.createTransition(topicOwnerRejectNode,orJoinNode,null);
wm.createTransition(timeoutNode,orJoinNode,null);
wm.createTransition(andSplitNode,orSplitNode,null);
wm.createTransition(orJoinNode,andJoinNode,null);
}
}
workflowSession.startWorkflow(wm, wd);
} catch (LoginException e) {
e.printStackTrace();
} catch (RepositoryException e) {
e.printStackTrace();
}
}
}
The error log is below!
20.04.2016 17:35:24.054 *INFO* [JobHandler: /etc/workflow/instances/2016-04-20/model_27918689599044:/content/samplemms/2016/02/index] com.adobe.granite.workflow.core.WorkflowSessionImpl Workflow model deployed: /etc/workflow/models/sample_mms_topic_owner_175(Version: 1.0)
20.04.2016 17:35:36.015 *ERROR* [JobHandler: /etc/workflow/instances/2016-04-20/model_27918689599044:/content/samplemms/2016/02/index] com.day.cq.workflow.compatibility.CQWorkflowProcessRunner Process execution resulted in an error: null
java.lang.NullPointerException: null
at com.adobe.granite.workflow.core.model.WorkflowModelImpl.createTransition(WorkflowModelImpl.java:155)
at com.adobe.granite.workflow.core.model.WorkflowModelImpl.createTransition(WorkflowModelImpl.java:149)
at com.day.cq.workflow.impl.model.CQWorkflowModelWrapper.createTransition(CQWorkflowModelWrapper.java:145)
at com.sample.mms.workflow.TopicOwnerHandleProcessStep.execute(TopicOwnerHandleProcessStep.java:105)
at com.day.cq.workflow.compatibility.CQWorkflowProcessRunner.execute(CQWorkflowProcessRunner.java:93)
at com.adobe.granite.workflow.core.job.HandlerBase.executeProcess(HandlerBase.java:215)
at com.adobe.granite.workflow.core.job.JobHandler.process(JobHandler.java:140)
at org.apache.sling.event.jobs.JobUtil$1.run(JobUtil.java:365)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
20.04.2016 17:35:36.015 *ERROR* [JobHandler: /etc/workflow/instances/2016-04-20/model_27918689599044:/content/samplemms/2016/02/index] com.adobe.granite.workflow.core.job.JobHandler Process execution resulted in an error
com.adobe.granite.workflow.WorkflowException: Process execution resulted in an error
at com.adobe.granite.workflow.core.job.HandlerBase.executeProcess(HandlerBase.java:225)
at com.adobe.granite.workflow.core.job.JobHandler.process(JobHandler.java:140)
at org.apache.sling.event.jobs.JobUtil$1.run(JobUtil.java:365)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: com.adobe.granite.workflow.WorkflowException: Failed to execute process
at com.day.cq.workflow.compatibility.CQWorkflowProcessRunner.execute(CQWorkflowProcessRunner.java:108)
at com.adobe.granite.workflow.core.job.HandlerBase.executeProcess(HandlerBase.java:215)
... 5 common frames omitted
Caused by: java.lang.NullPointerException: null
at com.adobe.granite.workflow.core.model.WorkflowModelImpl.createTransition(WorkflowModelImpl.java:155)
at com.adobe.granite.workflow.core.model.WorkflowModelImpl.createTransition(WorkflowModelImpl.java:149)
at com.day.cq.workflow.impl.model.CQWorkflowModelWrapper.createTransition(CQWorkflowModelWrapper.java:145)
at com.sample.mms.workflow.TopicOwnerHandleProcessStep.execute(TopicOwnerHandleProcessStep.java:105)
at com.day.cq.workflow.compatibility.CQWorkflowProcessRunner.execute(CQWorkflowProcessRunner.java:93)
... 6 common frames omitted
Instead of com.day.cq.workflow APIs,Can you try using the com.adobe.granite.workflow APIs.

JMapframe displays only a single shapefile

I used the Netbeans and GeoTools to program a graphical interface to display multiple shapefiles in the same JmapFrame. I used the following code but I do not know, when execute, it display only one shapefile.Svp, someone can help me, I await your answers.
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
import java.io.File;
import org.geotools.data.FeatureSource;
import org.geotools.data.FileDataStore;
import org.geotools.data.FileDataStoreFinder;
import org.geotools.data.simple.SimpleFeatureCollection;
import org.geotools.data.simple.SimpleFeatureIterator;
import org.geotools.map.DefaultMapContext;
import org.geotools.map.MapContext;
import org.geotools.swing.JMapFrame;
import org.geotools.swing.data.JFileDataStoreChooser;
import org.opengis.feature.simple.SimpleFeature;
/**
*
* #author Brahim
*/
class ImportVecteur2
{
private JMapFrame fenMap;
private MapContext mapContext;
ImportVecteur2(JMapFrame fenMap)
{
//this.mapContext = mapContext;
this.fenMap = fenMap;
}
#SuppressWarnings("static-access")
public void chercheAfficheVecteur() //throws Exception
{
try
{
File file = JFileDataStoreChooser.showOpenFile("shp", null);
if (file == null)
{
return;
}
FileDataStore store = FileDataStoreFinder.getDataStore(file);
FeatureSource featureSource = store.getFeatureSource();
//get vertices of file
// Create a map context and add our shapefile to it
mapContext = new DefaultMapContext();
mapContext.addLayer(featureSource, null);
// Now display the map
fenMap.enableLayerTable(true);
fenMap.setMapContext(mapContext);
fenMap.setVisible(true);
}
Each time you call chercheAfficheVecteur you create a new MapContext so the previous one is thrown away and with it your previous shapefile. If you change the method to be
public void chercheAfficheVecteur() {
try {
File file = JFileDataStoreChooser.showOpenFile("shp", null);
if (file == null) {
return;
}
FileDataStore store = FileDataStoreFinder.getDataStore(file);
FeatureSource featureSource = store.getFeatureSource();
//get vertices of file
// Create a map context and add our shapefile to it
if(mapContext == null){
mapContext = new DefaultMapContext();
fenMap.setMapContext(mapContext);
}
//make it look prettier
Style style = SLD.createSimpleStyle(featureSource.getSchema());
mapContext.addLayer(featureSource, style);
}
and
ImportVecteur2(JMapFrame fenMap)
{
//this.mapContext = mapContext;
this.fenMap = fenMap;
fenMap.enableLayerTable(true);
fenMap.setVisible(true);
}
It should work better.
After further testing (i.e. I actually compiled some code) - MapContext is deprecated (and has been for some time) please use MapContent.
package org.geotools.tutorial.quickstart;
import java.awt.Color;
import java.awt.Dimension;
import java.io.File;
import java.io.IOException;
import org.geotools.data.FeatureSource;
import org.geotools.data.FileDataStore;
import org.geotools.data.FileDataStoreFinder;
import org.geotools.map.FeatureLayer;
import org.geotools.map.Layer;
import org.geotools.map.MapContent;
import org.geotools.styling.SLD;
import org.geotools.styling.Style;
import org.geotools.swing.JMapFrame;
import org.geotools.swing.data.JFileDataStoreChooser;
public class Test {
private static final Color[] color = { Color.red, Color.blue, Color.green,
Color.MAGENTA };
private static MapContent mapContext;
private static JMapFrame fenMap;
public static void main(String args[]) throws IOException {
Test me = new Test();
me.run();
}
public void run() throws IOException {
fenMap = new JMapFrame();
mapContext = new MapContent();
fenMap.setMapContent(mapContext);
fenMap.enableToolBar(true);
fenMap.setMinimumSize(new Dimension(300, 300));
fenMap.setVisible(true);
int i = 0;
while (chercheAfficheVecteur(i)) {
i++;
i = i % color.length;
}
}
public boolean chercheAfficheVecteur(int next) throws IOException {
File file = JFileDataStoreChooser.showOpenFile("shp", null);
if (file == null) {
return false;
}
FileDataStore store = FileDataStoreFinder.getDataStore(file);
FeatureSource featureSource = store.getFeatureSource();
// get vertices of file
// Create a map context and add our shapefile to it
if (mapContext == null) {
}
// make it look prettier
Style style = SLD.createSimpleStyle(featureSource.getSchema(), color[next]);
Layer layer = new FeatureLayer(featureSource, style);
mapContext.addLayer(layer);
return true;
}
}

Only one of the two AutoCompleteTextView are showing suggestions

I have two AutoCompleteTextView controls on the same page: ACTV1 and ACTV2 and only one (ACTV1 ) is showing suggestions from my database . For each databinding action I've made a java class separetely: ACTV1.java and ACTV2.java.
But if I am adding an intent filter (MAIN, LAUNCHER) in my manifest file for ACTV2.java class and setting in run configuration ACTV2.java as Launch Action then I won't get suggestions anymore for ACTV1 control but this time I'll get suggestions for ACTV2 control.
The two java classes are identically just that differ the name of some constants/controls name.
package com.fishing2;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.message.BasicNameValuePair;
import org.json.JSONArray;
import org.json.JSONObject;
import android.app.Activity;
import android.os.Bundle;
import android.text.Editable;
import android.text.TextWatcher;
import android.util.Log;
import android.widget.ArrayAdapter;
import android.widget.AutoCompleteTextView;
public class CompleteBalti extends Activity {
//private CustomAutoCompleteView CompleteBalti;
private ArrayAdapter<String> adaperbalti;
#Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_partida);
}
final TextWatcher textChecker = new TextWatcher() {
public void afterTextChanged(Editable s) {}
public void beforeTextChanged(CharSequence s, int start, int count, int after) { }
public void onTextChanged(CharSequence s, int start, int before, int count)
{
adaperbalti.clear();
callPHP1();
}
};
private void callPHP1(){
String result = "";
InputStream is=null;
AutoCompleteTextView CompleteBalti = (AutoCompleteTextView) findViewById(R.id.nume_localitate);
ArrayList<NameValuePair> nameValuePairs = new ArrayList<NameValuePair>();
nameValuePairs.add(new BasicNameValuePair("st",CompleteBalti.getText().toString()));
{
try{
HttpClient httpclient = new DefaultHttpClient();
HttpPost httppost = new HttpPost("http://192.168.3.159/wtf/balti.php");
httppost.setEntity(new UrlEncodedFormEntity(nameValuePairs,"utf-8"));
HttpResponse response = httpclient.execute(httppost);
HttpEntity entity = response.getEntity();
is = entity.getContent();
BufferedReader reader = new BufferedReader(new InputStreamReader(is,"utf-8"),8);
StringBuilder sb = new StringBuilder();
String line = null;
while ((line = reader.readLine()) != null) {
sb.append(line + "\n");
}
is.close();
result=sb.toString();
result = result.substring(1);
}catch(Exception e){
Log.e("log_tag", "Error in http connection "+e.toString());
}
try{
JSONArray jArray = new JSONArray(result);
JSONObject json_data = null;
for (int i=0;i<jArray.length(); i++)
{
json_data = jArray.getJSONObject(i);
adaperbalti.add(json_data.getString("nume_balta"));
}
} catch(Exception e1){
Log.e("log_tag", "Error converting result "+e1.toString());
}
}
}
}