How to enable Bootchart in android - init

My current progress is as follows,
Enable Bootchart.h by
#ifndef BOOTCHART
# define BOOTCHART 1
#endif
After that I compile the code, make INIT_BOOTCHART=true kernel -j4. So after compile completes I did
1. adb root
2. adb shell
3. cd data
4. echo 120 > bootchart-start (cat bootchart-start -> 120)
5. mkdir bootchart
6. reboot bootloader
7. fastboot flash kernel kernel.img
8. reboot
Then I check the log files in inside of the data/bootchart folder. But it's empty after few minutes later also.
So I put some logs in system/core/init/init.c -> bootchart_init_action
static int bootchart_init_action(int nargs, char **args)
{
ERROR("#### JACH #### - BOOTCHART'%d':'%s'\n", 0, "bootchart_init_action -start");
bootchart_count = bootchart_init();
ERROR("#### JACH #### - BOOTCHART'%d':'%s'\n", bootchart_count, "bootchart_init_action -bootchart_count");
if (bootchart_count < 0) {
ERROR("bootcharting init failure\n");
} else if (bootchart_count > 0) {
NOTICE("bootcharting started (period=%d ms)\n", bootchart_count*BOOTCHART_POLLING_MS);
} else {
NOTICE("bootcharting ignored\n");
}
ERROR("#### JACH #### - BOOTCHART'%d':'%s'\n", 0, "bootchart_init_action -end");
return 0;
}
After compiling the above code, I saw bootchart_init() function return 0 value. Below is the log file.
<11>[ 2.814551] init: #### JACH #### - BOOTCHART'0':'bootchart_init_action -start'
<11>[ 2.814591] init: #### JACH #### - bootchart_init'0':'start'
<11>[ 2.814704] init: #### JACH #### - BOOTCHART'0':'bootchart_init_action -bootchart_count'
<11>[ 2.814721] init: #### JACH #### - BOOTCHART'0':'bootchart_init_action -end'
So bootchart_init() as follows:
#define LOG_STARTFILE "/data/bootchart-start"
int bootchart_init( void )
{
ERROR("#### JACH #### - bootchart_init'%d':'%s'\n", 0, "start");
int ret;
char buff[4];
int timeout = 0, count = 0;
buff[0] = 0;
proc_read( LOG_STARTFILE, buff, sizeof(buff) );
if (buff[0] != 0) {
timeout = atoi(buff);;
ERROR("#### JACH #### - bootchart_init'%d':'%s'\n", timeout, "timeout");
}
else {
/* when running with emulator, androidboot.bootchart=<timeout>
* might be passed by as kernel parameters to specify the bootchart
* timeout. this is useful when using -wipe-data since the /data
* partition is fresh
*/
char cmdline[1024];
char* s;
#define KERNEL_OPTION "androidboot.bootchart="
proc_read( "/proc/cmdline", cmdline, sizeof(cmdline) );
s = strstr(cmdline, KERNEL_OPTION);
if (s) {
s += sizeof(KERNEL_OPTION)-1;
timeout = atoi(s);
}
}
if (timeout == 0)
return 0;
if (timeout > BOOTCHART_MAX_TIME_SEC)
timeout = BOOTCHART_MAX_TIME_SEC;
count = (timeout*1000 + BOOTCHART_POLLING_MS-1)/BOOTCHART_POLLING_MS;
do {ret=mkdir(LOG_ROOT,0755);}while (ret < 0 && errno == EINTR);
file_buff_open(log_stat, LOG_STAT);
file_buff_open(log_procs, LOG_PROCS);
file_buff_open(log_disks, LOG_DISK);
/* create kernel process accounting file */
{
int fd = open( LOG_ACCT, O_WRONLY|O_CREAT|O_TRUNC,0644);
if (fd >= 0) {
close(fd);
acct( LOG_ACCT );
}
}
log_header();
ERROR("#### JACH #### - bootchart_init'%d':'%s'\n", 0, "end");
return count;
}
I guess while booting starts the above code(proc_read( LOG_STARTFILE, buff, sizeof(buff) );) has no permission to read or write data or the files partitions are not created. And also I hard coded the timeout but still bootchart_step() function are working, But log files not write in the data/bootchart folder.
But I don't know how to enable this problem. Thanks for attend my question.

Related

UDP packets are dropped in OS (asio is used)

I am developing client-server application which transfers data via UDP.
I am facing the problem of dropped packets. I added socket buffer checking to detect potential overflow. Also my app checks sequence of received numbers in packets. Packets have fixed size. If free space of socket buffer is less than threshold (size of 3 packets for example) then "Critical level of buffer" message is logged. If number of packet is skipped in sequence then corresponding message is logged. There is code:
UdpServer::UdpServer(asio::io_service& io, uint16_t port, uint32_t packetSize) : CommunicationBase(io, port),
m_socket(io, asio::ip::udp::endpoint(asio::ip::address_v6::any(), m_port))
{
m_buffer = new uint8_t[packetSize];
m_packetSize = packetSize;
m_socketBufferSize = m_packetSize * 32;
m_criticalLevel = 5 * m_packetSize;
asio::ip::udp::socket::receive_buffer_size recieveBuffSize(m_socketBufferSize);
m_socket.set_option(recieveBuffSize);
}
UdpServer::~UdpServer()
{
std::free(m_buffer);
}
void UdpServer::StartReceive(std::function<void(uint8_t* buffer, uint32_t bytesCount)> receiveHandler)
{
m_onReceive = receiveHandler;
Receive();
}
inline void UdpServer::Receive()
{
m_socket.async_receive(asio::null_buffers(), [=](const boost::system::error_code& error, size_t bytesCount)
{
OnReceive(bytesCount, error);
});
}
void UdpServer::OnReceive(size_t bytesCount, const boost::system::error_code& error)
{
static uint16_t lastSendNum = 65535;
uint16_t currentNum = 0;
uint16_t diff = 0;
if (error)
{
if (error == asio::error::operation_aborted)
{
logtrace << "UDP socket reports operation aborted, terminating";
return;
}
logerror << "UDP socket error (ignoring): " << error.message();
}
else
{
asio::ip::udp::endpoint from;
boost::system::error_code receiveError;
size_t bytesRead = 0;
size_t bytesAvailable = m_socket.available();
while (bytesAvailable > 0)
{
if (m_socketBufferSize - bytesAvailable < m_criticalLevel)
{
logwarning << "Critical buffer level!";
}
bytesRead = m_socket.receive(asio::buffer(m_buffer, m_packetSize), 0, receiveError);
if (receiveError)
{
logerror << "UDP socket error: " << receiveError.message();
break;
}
currentNum = *reinterpret_cast<uint16_t*>(m_buffer);
diff = currentNum - lastSendNum;
if (diff != 1)
{
logdebug << "Chunk skipped: " << diff << ". Last " << lastSendNum << " next " << currentNum;
}
lastSendNum = currentNum;
if (m_onReceive)
{
m_onReceive(m_buffer, bytesRead);
}
bytesAvailable = m_socket.available();
}
}
Receive();
}
Even if checking of buffer status and packet processing m_onReceive are disabled and bytesAvailable > 0 replaced with true, udp packets are dropped. Speed rate is ~71 Mb/s via 1Gb Ethernet.
Windows 10 is used. Also I checked netstat -s result: no reassembly failures. Socket buffer is never being overflowed.

Linux TCP socket timestamping option

Quoting form this online kernel doc
SO_TIMESTAMPING
Generates timestamps on reception, transmission or both. Supports
multiple timestamp sources, including hardware. Supports generating
timestamps for stream sockets.
Linux supports TCP timestamping, and I tried to write some demo code to get any timestamp for TCP packet.
The server code as below:
//Bind
if( bind(socket_desc,(struct sockaddr *)&server , sizeof(server)) < 0)
{
perror("bind failed. Error");
return 1;
}
puts("bind done");
//Listen
listen(socket_desc , 3);
//Accept and incoming connection
puts("Waiting for incoming connections...");
int c = sizeof(struct sockaddr_in);
client_sock = accept(socket_desc, (struct sockaddr *)&client, (socklen_t*)&c);
if (client_sock < 0)
{
perror("accept failed");
return 1;
}
// Note: I am trying to get software timestamp only here..
int oval = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE;
int olen = sizeof( oval );
if ( setsockopt( client_sock, SOL_SOCKET, SO_TIMESTAMPING, &oval, olen ) < 0 )
{ perror( "setsockopt TIMESTAMP"); exit(1); }
puts("Connection accepted");
char buf[] = "----------------------------------------";
int len = strlen( buf );
struct iovec myiov[1] = { {buf, len } };
unsigned char cbuf[ 40 ] = { 0 };
int clen = sizeof( cbuf );
struct msghdr mymsghdr = { 0 };
mymsghdr.msg_name = NULL;
mymsghdr.msg_namelen = 0;
mymsghdr.msg_iov = myiov;
mymsghdr.msg_iovlen = 1;
mymsghdr.msg_control = cbuf;
mymsghdr.msg_controllen = clen;
mymsghdr.msg_flags = 0;
int read_size = recvmsg( client_sock, &mymsghdr, 0);
if(read_size == 0)
{
puts("Client disconnected");
fflush(stdout);
}
else if(read_size == -1)
{
perror("recv failed");
}
else
{
struct msghdr *msgp = &mymsghdr;
printf("msg received: %s \n",(char*)msgp->msg_iov[0].iov_base);// This line is successfully hit.
// Additional info: print msgp->msg_controllen inside gdb is 0.
struct cmsghdr *cmsg;
for ( cmsg = CMSG_FIRSTHDR( msgp );
cmsg != NULL;
cmsg = CMSG_NXTHDR( msgp, cmsg ) )
{
printf("Time GOT!\n"); // <-- This line is not hit.
if (( cmsg->cmsg_level == SOL_SOCKET )
&&( cmsg->cmsg_type == SO_TIMESTAMPING ))
printf("TIME GOT2\n");// <-- of course , this line is not hit
}
}
Any ideas why no timestamping is available here ? Thanks
Solution
I am able to get the software timestamp along with hardware timestamp using onload with solarflare NIC.
Still no idea how to get software timestamp alone.
The link you gave, in the comments at the end, says:
I've discovered why it doesn't work. SIOCGSTAMP only works for UDP
packets or RAW sockets, but does not work for TCP. – Gio Mar 17 '16 at 9:331
it doesn't make sense to ask for timestamps for TCP, because there's
no direct correlation between arriving packets and data becoming
available. If you really want timestamps for TCP you'll have to use
RAW sockets and implement your own TCP stack (or use a userspace TCP
library). – ecatmur Jul 4 '16 at 10:39

setsockopt() get EBADF in mmaped netlink

Im trying to use memory map I/O netlink to transfer bulk packets from kernel to user space, and I followed a guide document from Patrick McHardy 1. However, when I try to setup the shared ring buffer in user space by using:
setsockopt(sock_fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req));
setsockopt(sock_fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req));
Both functions return -1, and the errno is 1, which means the descriptor is invalid. Im confused about that because I also referred to many other source codes and they can setup the ring successfully.
My code is almost the same as Patrick's 1:
int sock_fd = -1;
sock_fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_DECODE);
if (sock_fd < 0)
return -1;
bind(sock_fd, (struct sockaddr *)&src_addr, sizeof(src_addr));
/* init the mmap buffer */
unsigned int block_size = 16 * getpagesize();
struct nl_mmap_req req = {
.nm_block_size = block_size,
.nm_block_nr = 64,
.nm_frame_size = 16384,
.nm_frame_nr = 64 * block_size / 16384,
};
/* Configure ring parameters */
if (setsockopt(sock_fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0){
if(errno > 0)
printf("%d\n", errno);
}
if (setsockopt(sock_fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0){
if(errno > 0)
printf("%d", errno);
exit(1);
}
This code is built in Ubuntu 14.04, and kernel version is 3.13.0-74-generic.
Anyone idea about it? Thanks a lot.

modbus_read_register - Error connection timed out

We are using libmodbus library to read register values from energy meter EM6400 which supports Modbus over RTU. We are facing the following two issues.
1) We are facing an issue with modbus_read_registers API, this API returns -1 and the error message is:
ERROR Connection timed out: select.
After debugging the library, we found this issue is due to the echo of request bytes in the response message.
read() API call in _modbus_rtu_recv returns request bytes first followed by response bytes. As a result, length_to_read is calculated in compute_data_length_after_meta() based on the request bytes instead of response bytes (which contains the number of bytes read) and connection timed out issue occurs.
We tried to use both 3.0.6 and 3.1.2 libmodbus versions but same issue occurs in both the versions.
2) modbus_rtu_set_serial_mode (ctx, MODBUS_RTU_RS485) returns "BAD file descriptor".
Please confirm if there is any API call missing or any parameter is not set correctly.
Our sample code to read register value is as follows.
int main()
{
modbus_t *ctx;
uint16_t tab_reg[2] = {0,0};
float avgVLL = -1;;
int res = 0;
int rc;
int i;
struct timeval response_timeout;
uint32_t tv_sec = 0;
uint32_t tv_usec = 0;
response_timeout.tv_sec = 5;
response_timeout.tv_usec = 0;
ctx = modbus_new_rtu("/dev/ttyUSB0", 19200, 'E', 8, 1);
if (NULL == ctx)
{
printf("Unable to create libmodbus context\n");
res = 1;
}
else
{
printf("created libmodbus context\n");
modbus_set_debug(ctx, TRUE);
//modbus_set_error_recovery(ctx, MODBUS_ERROR_RECOVERY_LINK |MODBUS_ERROR_RECOVERY_PROTOCOL);
rc = modbus_set_slave(ctx, 1);
printf("modbus_set_slave return: %d\n",rc);
if (rc != 0)
{
printf("modbus_set_slave: %s \n",modbus_strerror(errno));
}
/* Commented - Giving 'Bad File Descriptor' issue
rc = modbus_rtu_set_serial_mode(ctx, MODBUS_RTU_RS485);
printf("modbus_rtu_set_serial_mode: %d \n",rc);
if (rc != 0)
{
printf("modbus_rtu_set_serial_mode: %s \n",modbus_strerror(errno));
}
*/
// This code is for version 3.0.6
modbus_get_response_timeout(ctx, &response_timeout);
printf("Default response timeout:%ld sec %ld usec \n", response_timeout.tv_sec, response_timeout.tv_usec );
response_timeout.tv_sec = 60;
response_timeout.tv_usec = 0;
modbus_set_response_timeout(ctx, &response_timeout);
modbus_get_response_timeout(ctx, &response_timeout);
printf("Set response timeout:%ld sec %ld usec \n", response_timeout.tv_sec, response_timeout.tv_usec );
/* This code is for version 3.1.2
modbus_get_response_timeout(ctx, &tv_sec, &tv_usec);
printf("Default response timeout:%d sec %d usec \n",tv_sec,tv_usec );
tv_sec = 60;
tv_usec = 0;
modbus_set_response_timeout(ctx, tv_sec,tv_usec);
modbus_get_response_timeout(ctx, &tv_sec, &tv_usec);
printf("Set response timeout:%d sec %d usec \n",tv_sec,tv_usec );
*/
rc = modbus_connect(ctx);
printf("modbus_connect: %d \n",rc);
if (rc == -1) {
printf("Connection failed: %s\n", modbus_strerror(errno));
res = 1;
}
rc = modbus_read_registers(ctx, 3908, 2, tab_reg);
printf("modbus_read_registers: %d \n",rc);
if (rc == -1) {
printf("Read registers failed: %s\n", modbus_strerror(errno));
res = 1;
}
for (i=0; i < 2; i++) {
printf("reg[%d]=%d (0x%X)\n", i, tab_reg[i], tab_reg[i]);
}
avgVLL = modbus_get_float(tab_reg);
printf("Average Line to Line Voltage = %f\n", avgVLL);
modbus_close(ctx);
modbus_free(ctx);
}
}
Output of this sample is as follows:
created libmodbus context
modbus_set_slave return: 0
modbus_rtu_set_serial_mode: -1
modbus_rtu_set_serial_mode: Bad file descriptor
Default response timeout:0 sec 500000 usec
Set response timeout:60 sec 0 usec
Opening /dev/ttyUSB0 at 19200 bauds (E, 8, 1)
modbus_connect: 0
[01][03][0F][44][00][02][87][0A]
Waiting for a confirmation...
ERROR Connection timed out: select
<01><03><0F><44><00><02><87><0A><01><03><04><C4><5F><43><D4><C6><7E>modbus_read_registers: -1
Read registers failed: Connection timed out
reg[0]=0 (0x0)
reg[1]=0 (0x0)
Average Line to Line Voltage = 0.000000
Issue 1) is probably a hardware issue, with "local echo" enabled in your RS-485 adapter. Local echo is sometimes used to confirm sending of data bytes on the bus. You need to disable it, or find another RS-485 adapter.
I have written about this in the documentation of my MinimalModbus Python library: Local Echo
It lists a few common ways to disable local echo in RS-485 adapters.

For Linux 3.10 what what changes do I need to make to get netmap/virtio_net working?

The netmap/virtio_net driver didn't work (Linux 3.10 kernel). There were two problems.
On the 3.10.60 kernel from kernel.org, the patch to virtio_net.c didn't
work, one part of the patch was rejected. This is easily fixed.
More serious, was that the virtio initialization code didn't work, nor
did the packet receive code. The basic problem was failure to initialize
the indices properly and failure to maintain a 1 slot separation between
head/tail indices. (Same problem 2 locations in the code.)
This problem is easily seen by creating a KVM guest with a
netmap/virtio_net driver, and simply pinging the guest from the host.
The receive traffic can easily be monitored using the pkt-gen tool on
the guest.
The first 255 pings will work fine, when the index hits 255, then the
packet receive will fail, and will continue to fail every time on slot 255.
I've included patches for both problems in the hopes that the source code
will be updated and others won't have to find these problems.
First virtio_netmap_3.10.60.patch:
# patch is the whole netmap virtio driver patch for 3.10.60 (from
# kernel.org), and it applies correctly.
#
Index: linux-3.10.60/drivers/net/virtio_net.c
===================================================================
--- linux-3.10.60.orig/drivers/net/virtio_net.c 2014-11-14 11:48:23.000000000 -0500
+++ linux-3.10.60/drivers/net/virtio_net.c 2014-11-21 12:54:29.751760095 -0500
## -131,6 +131,10 ##
struct notifier_block nb;
};
+#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE)
+#include <virtio_netmap.h>
+#endif
+
struct skb_vnet_hdr {
union {
struct virtio_net_hdr hdr;
## -210,6 +214,10 ##
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
+#ifdef DEV_NETMAP
+ if (netmap_tx_irq(vi->dev, vq2txq(vq)))
+ return;
+#endif
/* We were probably waiting for more output buffers. */
netif_wake_subqueue(vi->dev, vq2txq(vq));
}
## -646,7 +654,16 ##
struct virtnet_info *vi = rq->vq->vdev->priv;
void *buf;
unsigned int r, len, received = 0;
+#ifdef DEV_NETMAP
+ int work_done = 0;
+
+ if (netmap_rx_irq(vi->dev, vq2rxq(rq->vq), &work_done)) {
+ napi_complete(napi);
+ ND("called netmap_rx_irq");
+ return 1;
+ }
+#endif
again:
while (received < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
## -679,6 +696,16 ##
{
struct virtnet_info *vi = netdev_priv(dev);
int i;
+#ifdef DEV_NETMAP
+ int ok = virtio_netmap_init_buffers(vi);
+
+ netmap_enable_all_rings(dev);
+ if (ok) {
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_enable(&vi->rq[i]);
+ return 0;
+ }
+#endif
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
## -972,6 +999,9 ##
struct virtnet_info *vi = netdev_priv(dev);
int i;
+#ifdef DEV_NETMAP
+ netmap_disable_all_rings(dev);
+#endif
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
## -1644,6 +1674,10 ##
goto free_recv_bufs;
}
+#ifdef DEV_NETMAP
+ virtio_netmap_attach(vi);
+#endif
+
/* Assume link up if device can't report link status,
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
## -1690,6 +1724,9 ##
{
struct virtnet_info *vi = vdev->priv;
+#ifdef DEV_NETMAP
+ netmap_detach(vi->dev);
+#endif
unregister_hotcpu_notifier(&vi->nb);
/* Prevent config work handler from accessing the device. */
Next is the virtio_netmap.patch
# There is a problem with the initialization, and during read packet with
# control of the indices .
#
# This problem is easily seen by building a KVM netmap/virtio_net driver, and
# simply pinging it (host pings KVM guest). All goes well, until ring buffer
# reaches index 255, and no packet is actually received. This will fix that
# problem and resulted in a working driver.
#
Index: b/LINUX/virtio_netmap.h
===================================================================
--- a/LINUX/virtio_netmap.h 2014-11-21 16:26:03.951278021 -0500
+++ b/LINUX/virtio_netmap.h 2014-11-21 16:26:25.451386665 -0500
## -398,8 +398,8 ##
* Second part: skip past packets that userspace has released.
*/
nm_i = kring->nr_hwcur; /* netmap ring index */
- if (nm_i != head) {
- for (n = 0; nm_i != head; n++) {
+ if (nm_next(nm_i, lim) != head) {
+ for (n = 0; nm_next(nm_i, lim) != head; n++) {
struct netmap_slot *slot = &ring->slot[nm_i];
void *addr = NMB(slot);
int err;
## -421,7 +421,7 ##
virtqueue_kick(vq);
nm_i = nm_next(nm_i, lim);
}
- kring->nr_hwcur = head;
+ kring->nr_hwcur = nm_i;
}
/* We have finished processing used RX buffers, so we have to tell
## -454,6 +454,7 ##
for (r = 0; r < na->num_rx_rings; r++) {
COMPAT_DECL_SG
struct netmap_ring *ring = na->rx_rings[r].ring;
+ struct netmap_kring *kring = &na->rx_rings[r];
struct virtqueue *vq = GET_RX_VQ(vi, r);
struct scatterlist *sg = GET_RX_SG(vi, r);
struct netmap_slot* slot;
## -485,6 +486,7 ##
if (VQ_FULL(vq, err))
break;
}
+ kring->nr_hwcur = i;
D("added %d inbufs on queue %d", i, r);
virtqueue_kick(vq);
}