to get no. of bytes received and sent in cascades bb 10 - blackberry-10

Im new to cascades programming. Im trying to develop an app that could give me the number of bytes received and sent.
Im using QnetworkSession methods bytesReceived() and bytesWritten() for it, but getting errors.
Any help would be appreciable.
Thanks
Code:
#include "Monitor.hpp"
#include <bb/cascades/Application>
#include <bb/cascades/QmlDocument>
#include <bb/cascades/AbstractPane>
#include <QtCore/QFile>
#include <QtNetwork/QNetworkSession>
#include <QtNetwork/QNetworkConfiguration>
#include <QtNetwork/QNetworkConfigurationManager>
using namespace bb::cascades;
QNetworkSession::QNetworkSession ( const QNetworkConfiguration & connectionConfig, QObject * parent = 0 );
Monitor::Monitor(bb::cascades::Application *app)
: QObject(app)
{
QNetworkSession qnetworksession;
// create scene document from main.qml asset
// set parent to created document to ensure it exists for the whole application lifetime
QmlDocument *qml = QmlDocument::create("asset:///main.qml").parent(this);
qml->setContextProperty("app", this);
// create root object for the UI
AbstractPane *root = qml->createRootObject<AbstractPane>();
// set created root object as a scene
app->setScene(root);
}
quint64 Monitor::Received() const
{
quint64 rec=qnetworksession.bytesReceived ();
return(rec);
}
quint64 Monitor::Sent() const
{
quint64 sen=qnetworksession.bytesWritten ();
return(sen);
}

Related

jxcore crashed at JX_CreateEmptyObject in sub instance creation thread

I am currently working on embedding jxcore to my robotic system program.When i am testing embedding api ,i produced the following code:
#include "stdafx.h"
#include "ScriptEngine.h"
#include <windows.h>
#include "thread"
#include "mutex"
int main(int argc, char **args) {
char* homeFolder = args[0];
JX_InitializeOnce(homeFolder);
JX_InitializeNewEngine();
JX_DefineMainFile("console.log('parent engine')");
JXValue obj;
// Parent engine created at main thread ,and JX_CreateEmptyObject worked ok!
JX_CreateEmptyObject(&obj);
JXValue global;
JX_GetGlobalObject(&global);
JX_SetNamedProperty(&global, "NativeBridge", &obj);
JX_Free(&obj);
JX_Free(&global);
JX_StartEngine();
//create a new engine instance and attach it to a new thread
thread t1 (create_new_engine);
t1.join();
Sleep(10 * 1000);
}
void create_new_engine() {
string homeFolder = "";
JX_InitializeNewEngine();
JX_DefineMainFile("console.log('sub engine')");
JX_StartEngine();
JXValue obj1;
// sub engine created at new thread ,and JX_CreateEmptyObject called fail!
//Exception thrown: read access violation.
//__imp_TlsGetValue(...) returned nullptr.
//program break at :
//JXCORE_EXTERN(bool)
//JX_CreateEmptyObject(JXValue *value) {
// node::commons *com = node::commons::getInstance();
JX_CreateEmptyObject(&obj1);
JXValue global;
JX_GetGlobalObject(&global);
JX_SetNamedProperty(&global, "NativeBridge", &obj1);
JX_Free(&obj1);
JX_Free(&global);
JX_Loop();
JX_StopEngine();
}
My working enviroment is : windows 10 ,visual studio 2015,runing in vm fusion on mac
Thanks.

Linux protocol handler to get packets and processes associated to them

Hi Linux networking experts,
I am trying to get a tool to monitor all sockets created by each process, and bandwidth used by each process. I could poll that information from /proc, but I would miss short-lived sockets that are created and destroyed between poll cycles.
The idea was to create a kernel module that registers a protocol handler with the networking subsystem, so that my handler function is called for each packet received. In the handler I wanted to look up the socket associated to the sk_buff, and the process that opened the socket. To get the processes waiting for the socket, I go through the wait queue for the socket and check the tasks in the list. I wrote this:
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/datalink.h>
#include <net/inet_hashtables.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <linux/list.h>
#include <linux/ip.h>
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xxx");
MODULE_AUTHOR("xxxx");
int prot_handler(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
static struct packet_type handler_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_IP),
.func = prot_handler,
};
int
prot_handler(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
const struct iphdr *iph;
const struct tcphdr *th;
struct sock *sk;
struct socket_wq *wq;
wait_queue_head_t *q;
struct task_struct * task;
//printk(KERN_ALERT "Got sk_buff.\n");
iph = ip_hdr(skb);
th = tcp_hdr(skb);
sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
iph->saddr, th->source,
iph->daddr, ntohs(th->dest),
skb->skb_iif);
/* __inet_lookup_skb is crashing. It might be because skb_steal_sock?
*
* __inet_lookup_skb:
* skb_steal_sock
* __inet_lookup
* __inet_lookup_established
* __inet_lookup_listener
*/
if (!sk)
return 0;
//printk(KERN_ALERT "Found active sock.\n");
// code mimics sock_def_readable
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
q = &wq->wait;
if (wq_has_sleeper(wq)) {
// code mimics __wake_up_common
wait_queue_t *curr, *next;
list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
task = curr->private;
if (task && task->pid != 0)
printk(KERN_ALERT "Got packet for process ID: %d\n", task->pid);
}
}
}
}
rcu_read_unlock();
return 0;
}
static int __init
dev_init(void) {
printk(KERN_ALERT "Registering protocol handler with network stack.\n");
dev_add_pack(&handler_packet_type);
return 0;
}
static void __exit
dev_exit(void) {
printk(KERN_ALERT "Removing protocol handler.\n");
dev_remove_pack(&handler_packet_type);
}
module_init(dev_init);
module_exit(dev_exit);
When I load this module, and started a ssh session to the system to test it. The handler gets called when I type something on the remote system, but the PID printed doesn't correlate to what I expect. And the handler doesn't always get called. I think there might be a race condition with ip_rcv.
Apr 22 10:20:56 ol71node1 kernel: Got packet for process ID: 13927307
Apr 22 10:20:56 ol71node1 kernel: Got packet for process ID: 13927307
Apr 22 10:20:56 ol71node1 kernel: Got packet for process ID: 13927307
Can someone point to how I could do this, even if the use case doesn't make a lot of sense?
Thanks in advance.

CodeBlocks throwing exception c0000005 APPCRASH on C++ code

Hello everyone first question here, but I get a lot of help from reading your responses now I have an issue that is getting the best of me.
I have a simple program:
#include <iostream>
#include <ctime>
#include <stdlib.h>
#include "room.h"
#include "area1.h"
using namespace std;
void game_engine();
int main()
{
game_engine();
cout << "The end" << endl;
return 0;
}
void game_engine()
{
area1 nw;
nw.welcome();
};
I also have an ADT base class called room and a child class called area1.
#include <iostream>
#include <ctime>
#include <stdlib.h>
#include "room.h"
#include "area1.h"
using namespace std;
area1::area1()
{
this->description = "You are now in the North-West corner of the island. You have water to the North and the West, and this area is hard to navigate because of all the vegetation. I hope you find something that you need. ";
this->name = "NORTHWEST";
this->odds = 25;
this->random = 100;
this->visited = false;
}
void area1::welcome()
{
cout << name << endl;
cout << description << endl;
}
void area1::treasure()
{
}
void area1::navigate()
{
}
area1::~area1()
{
delete north;
delete east;
delete west;
delete south;
}
What I don't understand is why it is crashing when all I am doing is calling a simple function from main, that calls a function in my area1 class. No parameters are passed and the output is correct in the console for the function calls, but it crashes before it returns to main to call output "the end" . I have done similar stuff without error quite often so this one is driving me nuts. Any help is appreciated.

MATLAB Engine API runs forever if code is separated

I'm trying to access MATLAB through Engine with MetaTrader Terminal 4( a trading software from MetaQuotes metatrader4 )
I use a DLL file to communicate between MetaTrader and MATLAB.
In the code, if I put this code in a single function, it works.
But if I separate it into two distinct functions, it runs forever/bugs out
#include "stdafx.h"
#include <string>
#include <fstream>
#include <sstream>
#include <iostream>
#include <vector>
#include <windows.h>
#include <memory.h>
#include "engine.h"
using namespace std;
#define MT4_EXPFUNC __declspec(dllexport)
Engine *pEng = NULL;
mxArray *closev = NULL;
double *closevp = NULL;
mxArray *getPArray = NULL;
MT4_EXPFUNC void InitEngine( int dummy )
{
pEng = engOpen( NULL );
}
MT4_EXPFUNC void InitCloseBuffer( int size )
{
closev = mxCreateDoubleMatrix( 1, size, mxREAL );
if ( closev != NULL )
closevp = mxGetPr( closev );
// ------------------------------------------------------------------
// the following code bugs when separated from the code above
// and put in another function called right after this one
engPutVariable( pEng, "closev", closev );
engEvalString( pEng, "[mainNet] = PTrainInit();" );
engEvalString( pEng, "[hitrate, mainNet] = PTrain(mainNet, closev);" );
engEvalString( pEng, "outGetP = PGetPrediction(mainNet, closev)" );
getPArray = engGetVariable( pEng, "outGetP" );
double *p;
if ( getPArray != NULL )
p = mxGetPr( getPArray );
// end of the separated code
// -----------------------------------------------------------------
}
I need to separate both functions though,because I need to fill closev with values.
What am I doing wrong?
thanks
Jeff
ok, I found the answer: initially I had made a standalone MATLAB Project, that was working fine but closev was column-wise and in the DLL closev is row-wise.
Also, populating closev and then using the second, separated, function was a totally different use of the function insides than with closev not populated.
These two things made it look strange error-wise though.
Glad I could make it work.
JfLB

C++ Amp GPU Data is'nt free after deleting the Data of pointer array GPU (dynamic allocation)

#include "stdafx.h"
#include <amp.h>
#include <assert.h>
#include <iostream>
#include <vector>
#include <iostream>
#include <ppl.h>
#include<stdio.h>
#include<conio.h>
using namespace ::Concurrency;
using std::vector;
static array<double, 1> *P_GPU;
int _tmain(int argc, _TCHAR* argv[])
{
accelerator default_device(accelerator::default_accelerator);
accelerator_view acc_v = default_device.default_view;
int N = 4*4096*4096;
double *xdata = new double[N];
memset(xdata,0,N);
extent<1> e_b(N);
P_GPU = new array<double, 1>(e_b, acc_v); // dynamic allocation of array
array<double, 1> bb(e_b, xdata, acc_v);
array_view<double, 1> dest(*P_GPU);
dest.discard_data();
parallel_for_each(dest.extent, [=,&bb](index<1> idx) restrict(amp)
{
dest[idx]=bb[idx];
});
dest.synchronize();
std::cout << "before delete .." << std::endl;
std::cin.get();
delete [] xdata; // the DATA of xdata pointer is deleted ..
delete P_GPU; // the DATA GPU of P_GPU is not deleted ???
std::cout << "Hit any key to exit..." << std::endl;
std::cin.get();
return 0;
}
The binary Code was tested by Microsoft Process Explorer v16.04.
I tested a problem of dynamic allocation of array (or array_view) in C++ AMP..
I see The GPU memory was not free after deleting the P_GPU pointer !!
This program was tested on Matlab ... (mexFunction)-> I have the same problem... ..
delete P_GPU;
I need to make a dynamic allocation (GPU C++AMP) of matrix in matlab.
I find the trick but i have complication when I do a deallocation
(delete) matrix in GPU memory ....
????
deallocate GPU memory ...
I just solved this problem following: Using pointers in C++Amp
please use: std::shared_ptr<>
class FrameProcessorAmpBase
{
private:
std::shared_ptr<array<float, 2> m_frame;
public:
FrameProcessorAmpBase()
{
}
void ConfigureFrameBuffers(int width, int height)
{
m_frame = std::make_shared<array<float, 2>>(height, width));
}