hi guys how can i collect the packet length for each packet in the pcap file? thanks a lot
I suggest a high-tech method, which very few people know: reading the documentation.
man pcap tells us there are actually two different lengths available:
caplen a bpf_u_int32 giving the number of bytes of the packet that are
available from the capture
len a bpf_u_int32 giving the length of the packet, in bytes (which
might be more than the number of bytes available from the cap-
ture, if the length of the packet is larger than the maximum num-
ber of bytes to capture)
An example in C:
/* Grab a packet */
packet = pcap_next(handle, &header);
if (packet == NULL) { /* End of file */
break;
}
printf ("Got a packet with length of [%d] \n",
header.len);
Another one in Python with the pcapy library:
import pcapy
reader = pcapy.open_offline("packets.pcap")
while True:
try:
(header, payload) = reader.next()
print "Got a packet of length %d" % header.getlen()
except pcapy.PcapError:
break
Those two examples below work fine:
using C, WinPcap
using python, SCAPY
(WinPcap)(Compiler CL , Microsoft VC)
I have wrote this function (in C) to get packet size and it works fine.
Don't forget to include pcap.h and set HAVE_REMOTE in compiler preprocessors
u_int getpkt_size(char * pcapfile){
pcap_t *indesc;
char errbuf[PCAP_ERRBUF_SIZE];
char source[PCAP_BUF_SIZE];
u_int res;
struct pcap_pkthdr *pktheader;
u_char *pktdata;
u_int pktsize=0;
/* Create the source string according to the new WinPcap syntax */
if ( pcap_createsrcstr( source, // variable that will keep the source string
PCAP_SRC_FILE, // we want to open a file
NULL, // remote host
NULL, // port on the remote host
pcapfile, // name of the file we want to open
errbuf // error buffer
) != 0)
{
fprintf(stderr,"\nError creating a source string\n");
return 0;
}
/* Open the capture file */
if ( (indesc= pcap_open(source, 65536, PCAP_OPENFLAG_PROMISCUOUS, 1000, NULL, errbuf) ) == NULL)
{
fprintf(stderr,"\nUnable to open the file %s.\n", source);
return 0;
}
/* get the first packet*/
res=pcap_next_ex( indesc, &pktheader, &pktdata);
if (res !=1){
printf("\nError Reading PCAP File");
return 0;
}
/* Get the packet size*/
pktsize=pktheader->len;
/* Close the input file */
pcap_close(indesc);
return pktsize;
}
Another wroking Example in Python using the wonderful SCAPY
from scapy.all import *
pkts=rdpcap("data.pcap",1) # reading only 1 packet from the file
OnePkt=pkts[0]
print len(OnePkt) # prints the length of the packet
Related
I have been trying to receive and process the packets from tunnel. There are separate blocks for processing v4 and v6 packets. If the packet does not fall under the either of the categories, they will be dropped. For me, every packets are being dropped during execution. When I used wireshark to capture the packets from the tunnel, I noticed the difference in packet size, i.e., length of the packet. For example, when the length of a received packet in Wireshark is 60 whereas the program prints it 64 as length. I noticed the 4 bytes difference in all packets. I am unable to find out, what I am doing wrong here? Would anyone help me. I also attached the screen of wireshark and program execution for perusal.
Image: Captured packets from tunnel through wireshark and program
#define MTU 1600
void processPacket(const uint8_t *packet, const size_t len) {
//1st octet identifies the IP version
uint8_t version = (*packet) >> 4;
//...
printf("IP version - %d\n", version);
if (version == 4 ) {
//ipv4 packet process ...
} else if (version == 6) {
//ipv6 packet process ...
} else {
//drop packet
printf("Unknown IP version, drop packet\n");
}
}
int main() {
struct ifreq ifr;
int fd;
uint8_t *buffer = (uint8_t *)(malloc(MTU));
ssize_t len;
if ( (fd = open("/dev/net/tun", O_RDWR)) == -1 ) {
perror("Unable to open /dev/net/tun");
exit(EXIT_FAILURE);
}
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TUN;
strncpy(ifr.ifr_name, "tun0", IFNAMSIZ);
if ( (err = ioctl(fd, TUNSETIFF, (void *) &ifr)) == -1 ) {
perror("Error encountered during ioctl TUNSETIFF");
close(fd);
exit(EXIT_FAILURE);
}
printf("Device tun0 opened\n");
while(1) {
len = read(fd, buffer, MTU);
printf("Read %lu bytes from tun0\n", len);
processPacket(buffer, len);
}
printf("\nPress any key to exit...");
getchar();
close(fd);
}
The tunnel device pre-pends the IP packet with additional information, so the first byte is not the IP version. If you don't need it, you can add IFF_NO_PI to ifr_flags. See kernel documentation.
I have problem with an SD card. I'm using the FatFs library ver R0.10b to access the SD card.
My code:
// .... //
FATFS fatfs;
FIL plik;
FRESULT fresult,res1,res2,res3,res4,res5;
UINT zapisanych_bajtow = 0 , br;
UINT zapianie_bajtow = 0;
char * buffor = "123456789abcdef\r\n";
unsigned short int i;
void main(void) {
// ... //
res1 = f_mount(0,&fatfs); // returns FA_OK
res2 = f_open( &plik, "f721.txt", FA_OPEN_ALWAYS | FA_WRITE ); // returns FA_OK
if( res2 == FR_OK )
{
res3 = f_write( &plik, ( const void * ) buffor, 17, &zapisanych_bajtow ); // returns FR_DISK_ERR
}
res4 = f_close( &plik );// returns FR_DISK_ERR
for(;;)
{
}
}
Any idea what might be wrong?
I had similar error with just one difference. I tried to write 4096bytes with f_write function at once. And it always returned FR_DISK_ERR.
And this was caused because I tried to write more then is size of IO buffer in FIL structure in FatFS (defined in ff.h).
typedef struct {
FATFS* fs; /* Pointer to the related file system object (**do not change order**) */
WORD id; /* Owner file system mount ID (**do not change order**) */
BYTE flag; /* Status flags */
BYTE err; /* Abort flag (error code) */
DWORD fptr; /* File read/write pointer (Zeroed on file open) */
DWORD fsize; /* File size */
DWORD sclust; /* File start cluster (0:no cluster chain, always 0 when fsize is 0) */
DWORD clust; /* Current cluster of fpter (not valid when fprt is 0) */
DWORD dsect; /* Sector number appearing in buf[] (0:invalid) */
DWORD dir_sect; /* Sector number containing the directory entry */
BYTE* dir_ptr; /* Pointer to the directory entry in the win[] */
DWORD* cltbl; /* Pointer to the cluster link map table (Nulled on file open) */
UINT lockid; /* File lock ID origin from 1 (index of file semaphore table Files[]) */
BYTE buf[_MAX_SS]; /* File private data read/write window */
} FIL;
The last array buf[_MAX_SS] is the file IO buffer. But _MAX_SS is user defined parameter (defined in ff.h) so you can decrease the amount of bytes written at once or eventually change the _MAX_SS value.
I know this is not your case because you only write 17 bytes at once, but this can be helpful for others.
It's few years when I finished with TMS but maybe it will help you:
FA_OPEN_ALWAYS Opens the file if it is existing. If not, a new file is created.
To append data to the file, use f_lseek() function after file open in this method.
If file does not exists use:
FA_CREATE_NEW Creates a new file. The function fails
with FR_EXIST if the file is existing.
I had the same issue with implementation of Chan FatFs on MSP430- always received FR_DISK_ERR result on calling disk_write().
My reason of the issue was the following:
operation failed on xmit_datablock() call, it returned 0.
xmit_datablock() failed because of xmit_spi_multi() failed.
xmit_spi_multi() failed because it was not enough to just transmit bytes from buffer.
It was necessary to read from RXBUF after every write.
Here it is how it looks after the issue was fixed:
/* Block SPI transfers */
static void xmit_spi_multi (
const BYTE* buff, /* Data to be sent */
UINT cnt /* Number of bytes to send */
)
{
do {
volatile char x;
UCA1TXBUF= *buff++; while(! (UCA1IFG & UCRXIFG)) ; x = UCA1RXBUF;
UCA1TXBUF= *buff++; while(! (UCA1IFG & UCRXIFG)) ; x = UCA1RXBUF;
} while (cnt -= 2);
}
Before fixing the issue there was no read from UCA1RXBUF following every write to UCA1TXBUF.
After fixing xmit_spi_multi() my issue with FR_DISK_ERR in disk_write() was solved.
I want to send a binary file over from client to server using sockets.
This is my code for reading the file:
size_t buffer_size = 512;
std::fstream myfile;
myfile.open(filePath[i], std::ios::in | std::ios::out |
std::ios::binary);
std::vector<char> myBuffer(512);
std::ostream_iterator<char> osIt(std::cout);
//std::ifstream myfile(filePath[i]);
while (myfile)
{
myfile.read(&myBuffer.front(), buffer_size);
if (!myfile)
{
myBuffer.resize(myfile.gcount());
}
std::string msg(myBuffer.begin(), myBuffer.end());
doLog("sending message");
sout << locker << "\n " << msg.c_str() << unlocker;
q_.enQ(msg);
::Sleep(10 * id()); // sleep time increases with each
addition Sender
if (pSt->status() == "bad")
break;
}
I am using BLOCKING queue and also reading the file block by bloxk with each block size as 512.
This is my code for saving file at the server side:
msg = q_.deQ();
std::ofstream myStream;
myStream.open("../Test/Client1/Received_Files/darkKnight.jpeg",
std::ios::out | std::ios::app | std::ios::binary);
myStream << msg;
if (msg == "stop\n"){
myStream.close();
}
I am not able to open the output image that is created.
String is not a container for binary data. Just read and write bytes.
I wouldn't use the stream classes for this, or indeed anything above read(), write(), send(), and recv(), unless you have a proper sockets class library available.
i have done simple tcp client/server program got working well with strings and character data...i wanted to take each frames(from a webcam) and sent it to server.. here is the part of client program where error happened:
line:66 if(send(sock, frame, sizeof(frame), 0)< 0)
error:
client.cpp:66:39: error: cannot convert ‘cv::Mat’ to ‘const void*’ for argument ‘2’ to ‘ssize_t send(int, const void*, size_t, int)
i cant recognise this error....kindly help...the following complete client program:
#include<stdio.h>
#include<sys/types.h>
#include<sys/socket.h>
#include<netinet/in.h>
#include<string.h>
#include<stdlib.h>
#include<netdb.h>
#include<unistd.h>
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
int sock;
struct sockaddr_in server;
struct hostent *hp;
char buff[1024];
VideoCapture capture;
Mat frame;
capture.open( 1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
begin:
capture.read(frame);
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
goto end;
}
sock=socket(AF_INET,SOCK_STREAM,0);
if(sock<0)
{
perror("socket failed");
exit(1);
}
server.sin_family =AF_INET;
hp= gethostbyname(argv[1]);
if(hp == 0)
{
perror("get hostname failed");
close(sock);
exit(1);
}
memcpy(&server.sin_addr,hp->h_addr,hp->h_length);
server.sin_port = htons(5000);
if(connect(sock,(struct sockaddr *) &server, sizeof(server))<0)
{
perror("connect failed");
close(sock);
exit(1);
}
int c = waitKey(30);
if( (char)c == 27 ) { goto end; }
if(send(sock, frame, sizeof(frame), 0)< 0)
{
perror("send failed");
close(sock);
exit(1);
}
goto begin;
end:
printf("sent\n",);
close(sock);
return 0;
}
Because TCP provides a stream of bytes, before you can send something over a TCP socket, you must compose the exact bytes you want to send. Your use of sizeof is incorrect. The sizeof function tells you how many bytes are needed on your system to store the particular type. This bears no relationship to the number of bytes the data will require over the TCP connection which depends on the particular protocol layered on top of TCP you are implementing which must specify how data is to be sent at the byte level.
like david already said, you got the length wrong. sizeof() won't help, what you want is probably
frame.total() * frame.channels()
you can't send a Mat object, but you can send the pixels ( the data pointer ) , so this would be:
send(sock, frame.data,frame.total() * frame.channels(), 0)
but still a bad idea. sending uncompressed pixels over the netwotrk ? bahh.
please look at imencode/imdecode
i'm pretty sure, you got the client / server roles in reverse here.
usually the server holds the information to retrieve ( the webcam ), and the client connects to that
and requests an image.
I'm using zlib to perform gzip compression. zlib writes the data directly to an open TCP socket after compressing it.
/* socket_fd is a file descriptor for an open TCP socket */
gzFile gzf = gzdopen(socket_fd, "wb");
int uncompressed_bytes_consumed = gzwrite(gzf, buffer, 1024);
(of course all error handling is removed)
The question is: how do you determine how many bytes were written to the socket? All the gz* functions in zlib deal with byte counts/offsets in the uncompressed domain, and tell (seek) doesn't work for sockets.
The zlib.h header says "This library can optionally read and write gzip streams in memory as well." Writing to a buffer would work (then I can write the buffer to the socket subsequently), but I can't see how to do that with the interface.
You'll be able to do this with the deflate* series of calls. I'm not going to show you everything, but this example program (which I had named "test.c" in my directory) should help you get started:
#include <zlib.h>
#include <stdlib.h>
#include <stdio.h>
char InputBufferA[4096];
char OutputBufferA[4096];
int main(int argc, char *argv[])
{
z_stream Stream;
int InputSize;
FILE *FileP;
Stream.zalloc = malloc;
Stream.zfree = free;
/* initialize compression */
deflateInit(&Stream, 3);
FileP = fopen("test.c", "rb");
InputSize = fread((void *) InputBufferA, 1, sizeof(InputBufferA), FileP);
fclose(FileP);
Stream.next_in = InputBufferA;
Stream.avail_in = InputSize;
Stream.next_out = OutputBufferA;
Stream.avail_out = sizeof(OutputBufferA);
deflate(&Stream, Z_SYNC_FLUSH);
/* OutputBufferA is now filled in with the compressed data. */
printf("%d bytes input compressed to %d bytes\n", Stream.total_in, Stream.total_out);
exit(0);
}
Consult the deflate documentation from zlib.h.
zlib can, in fact, write gzip formatted data to a buffer in memory.
This zlib faq entry defers to comments in zlib.h. In the header file, the comment for deflateInit2() mentions that you should (arbitrarily?) add 16 to the 4th parameter (windowBits) in order to cause the library to format the deflate stream with the gzip format (instead of the default "zlib" format).
This code gets the zlib state set up properly to encode gzip to a buffer:
#include <zlib.h>
z_stream stream;
stream.zalloc = Z_NULL;
stream.zfree = Z_NULL;
stream.opaque = Z_NULL;
int level = Z_DEFAULT_COMPRESSION;
int method = Z_DEFLATED; /* mandatory */
int windowBits = 15 + 16; /* 15 is default as if deflateInit */
/* were used, add 16 to enable gzip format */
int memLevel = 8; /* default */
int strategy = Z_DEFAULT_STRATEGY;
if(deflateInit2(&stream, level, method, windowBits, memLevel, strategy) != Z_OK)
{
fprintf(stderr, "deflateInit failed\n");
exit(EXIT_FAILURE);
}
/* now use the deflate function as usual to gzip compress */
/* from one buffer to another. */
I confirmed that this procedure yields the exact same binary output as the gzopen/gzwrite/gzclose interface.