My code:
SecKeyRef oPublicKey = [self getPublicKeyRef];
SecKeyRef oPrivateKey = [self getPrivateKeyRef];
CFDictionaryRef myDictionary;
CFTypeRef keys[2];
CFTypeRef values[2];
// Initialize dictionary of key params
keys[0] = kSecAttrKeyType;
values[0] = kSecAttrKeyTypeRSA;
keys[1] = kSecAttrKeySizeInBits;
int iByteSize = 1024;
values[1] = CFNumberCreate( NULL, kCFNumberIntType, &iByteSize );
myDictionary = CFDictionaryCreate( NULL, keys, values, sizeof(keys) / sizeof(keys[0]), NULL, NULL );
// Generate keys
OSStatus status = SecKeyGeneratePair( myDictionary, &oPublicKey, &oPrivateKey );
if ( status != 0 )
NSLog( #"SecKeyGeneratePair failed" );
// Encrypt some data
uint8_t* pPlainText = (uint8_t*)"6921";
uint8_t aCipherText[1024];
size_t iCipherLength = 1024;
status = SecKeyEncrypt( oPublicKey, kSecPaddingPKCS1, pPlainText, strlen( (char*)pPlainText ) + 1, &aCipherText[0], &iCipherLength );
if ( status != 0 )
NSLog( #"SecKeyEncrypt failed" );
NSMutableData *data12=[[NSMutableData alloc] init];
[data12 appendBytes:aCipherText length:strlen( (char*)aCipherText ) + 1];
NSString *string1 = [[NSString alloc]initWithData:data12 encoding:NSASCIIStringEncoding];
NSLog(#"Encrypted Text:::%#",string1);
// Decrypt the data
uint8_t aPlainText[1024];
size_t iPlainLength = 1024;
status = SecKeyDecrypt( oPrivateKey, kSecPaddingPKCS1, &aCipherText[0], iCipherLength, &aPlainText[0], &iPlainLength );
if ( status != 0 )
NSLog( #"SecKeyDecrypt failed" );
NSLog(#"FINAL decrypted text: %s", aPlainText);
I am using this code for encryption but we are getting output like:
N$) : )¥=wÞ¢#4+ MO, ÝÕ#2< xÕHþ a  f¦U;f£Æì ø¤VØ´ÿ ÷ påcx ¨¢¯Ô)Õ#k Ë«: çÑ5
õ°7\Ûz~9GX>M¼êeë¿w¥ro¬ gP4s2µ)9; p
But how to get NSString format ?Is there any wrong step in my code?
Always pad your plain text. PKCS #1 is the standard padding for RSA encryption.
Also, be aware that with RSA public key encryption, you can only encrypt/decrypt data up to the size of the key (minus a few overhead bytes).
Public key encryption is typically used only to encrypt a key (and hash), which is then used for symmetric encryption (and verification) of the actual payload.
Related
using Linux (ubuntu 20.04) machine, openssl 3.0.7 is installed , running a sample code for signing.
we followed below procedure for signing. getting segmentation fault in EVP_DigestFinal_ex.
segmentation fault is happening due to mdctx->algctx=0x0. while debugging the code mdctx->algctx is updated in EVP_DigestInit_ex but latter it was freed in EVP_DigestSignInit. not sure what we are missing and how to update mdctx->algctx to avoid the crash.
#include <stdio.h>
#include <string.h>
#include <openssl/evp.h>
#include <openssl/provider.h>
EVP_PKEY *pkey = NULL;
generate_key(){
EVP_PKEY_CTX *ctx=NULL;
pkey=EVP_PKEY_new();
ctx=EVP_PKEY_CTX_new(pkey,NULL);
ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_RSA, NULL);
if (!ctx)
printf(" key gen failed");
if (EVP_PKEY_keygen_init(ctx) <= 0)
printf(" key gen failed");
if (EVP_PKEY_CTX_set_rsa_keygen_bits(ctx, 512) <= 0)
printf(" key gen failed");
/* Generate key */
if (EVP_PKEY_keygen(ctx, &pkey) <= 0)
printf(" key gen failed");
}
int main(int argc, char *argv[])
{
EVP_MD_CTX *mdctx;
const EVP_MD *m_md;
const EVP_MD *md;
EVP_PKEY *m_key;
EVP_PKEY *ed_pkey = NULL;
EVP_PKEY_CTX *ed_pctx = NULL;
// OSSL_PROVIDER *default;
size_t sign_len = 0;
u_int8_t m_sign_buf[2048];
int ret = 0;
char mess1[] = "Test Message\n";
char mess2[] = "Hello World\n";
unsigned char *outdigest = NULL;
unsigned int md_len = 0, i;
printf("args : %s\n",argv[1]);
//default = OSSL_PROVIDE_load(NULL, "default");
//md = EVP_get_digestbyname("SHA256");
//md = EVP_sha256();
md = EVP_MD_fetch(NULL, "SHA256", NULL); //;
if (md == NULL) {
printf("Unknown message digest %s\n", argv[1]);
exit(1);
}
generate_key();
printf("value of md %s\n",md);
mdctx = EVP_MD_CTX_new();
if((EVP_DigestInit_ex(mdctx, md, NULL)) != 1)
printf("EVP_DigestInit_ex failed \n");
if((EVP_DigestSignInit(mdctx, NULL, md, NULL, pkey)) != 1)
printf("EVP_DigestSignInit failed \n");
if((EVP_DigestSignUpdate(mdctx, mess1, strlen(mess1))) != 1)
printf("EVP_DigestSignUpdate failed \n");
//EVP_DigestUpdate(mdctx, mess2, strlen(mess2));
if((EVP_DigestSignFinal(mdctx, (u_int8_t*)NULL, &sign_len)) != 1)
printf("EVP_DigestSignFinal failed \n");
if((EVP_DigestSignFinal(mdctx, m_sign_buf, &sign_len)) != 1)
printf("EVP_DigestSignFinal 2 failed \n");
/* Allocate the output buffer */
outdigest = OPENSSL_malloc(EVP_MD_get_size(md));
if (outdigest == NULL)
printf("outdigest failed \n");
if((EVP_DigestFinal_ex(mdctx, outdigest, &md_len)) != 1)
printf("EVP_DigestFinal_ex failed \n");
EVP_MD_CTX_free(mdctx);
/* Print out the digest result */
BIO_dump_fp(stdout, outdigest, &md_len);
exit(0);
}
`
```
Thanks,
while debugging the code mdctx->algctx is updated in EVP_DigestInit_ex but latter it was freed in EVP_DigestSignInit. not sure what we are missing and how to update mdctx->algctx to avoid the crash.
CRASH Info:
Program received signal SIGSEGV, Segmentation fault.
0x00007ffff7d99422 in SHA256_Final (md=0x5555555a88d0 "\250UUU\005", c=0x0)
at ../openssl-3.0.7/include/crypto/md32_common.h:194
194 size_t n = c->num;
(gdb) bt
#0 0x00007ffff7d99422 in SHA256_Final (md=0x5555555a88d0 "\250UUU\005", c=0x0)
at ../openssl-3.0.7/include/crypto/md32_common.h:194
#1 0x00007ffff7e2628c in sha256_internal_final (ctx=0x0, out=0x5555555a88d0 "\250UUU\005", outl=0x7fffffffda98,
outsz=32) at ../openssl-3.0.7/providers/implementations/digests/sha2_prov.c:72
#2 0x00007ffff7cbadf6 in EVP_DigestFinal_ex (ctx=0x555555580d80, md=0x5555555a88d0 "\250UUU\005",
isize=0x7fffffffdad8) at ../openssl-3.0.7/crypto/evp/digest.c:446
#3 0x000055555555575f in main (argc=1, argv=0x7fffffffe458) at test2.c:90
I'm trying to get the same result as
openssl X509 -in certificate.pem -subject_hash
without using the function directly but instead by extracting the cerrtificate name and building the propper canonical representation to then take the sha-1 hash from. Is this possible? Something like
openssl x509 -in certificate.pem -subject -noout -nameopt dn_rev -nameopt RFC2253 | tr '[:upper:]' '[:lower:]' | openssl dgst -sha1 -binary | xxd -p
and then truncating the hash to 4bytes or so... so far I didn't get it though
$ openssl x509 -in .pem -subject_hash -noout
cc952886
$openssl x509 -in certificate.pem -subject -noout -nameopt RFC2253 | tr '[:upper:]' '[:lower:]' | openssl dgst -sha1 -binary | xxd -p
0b6a015b2a7ed2a5f3695f1d46a0c20006de300a
The respective c code is here: https://github.com/openssl/openssl/blob/d53b437f9992f974c1623e9b9b9bdf053aefbcc3/crypto/x509/x509_cmp.c#L261
unsigned long X509_NAME_hash_ex(const X509_NAME *x, OSSL_LIB_CTX *libctx,
const char *propq, int *ok)
{
unsigned long ret = 0;
unsigned char md[SHA_DIGEST_LENGTH];
EVP_MD *sha1 = EVP_MD_fetch(libctx, "SHA1", propq);
/* Make sure X509_NAME structure contains valid cached encoding */
i2d_X509_NAME(x, NULL);
if (ok != NULL)
*ok = 0;
if (sha1 != NULL
&& EVP_Digest(x->canon_enc, x->canon_enclen, md, NULL, sha1, NULL)) {
ret = (((unsigned long)md[0]) | ((unsigned long)md[1] << 8L) |
((unsigned long)md[2] << 16L) | ((unsigned long)md[3] << 24L)
) & 0xffffffffL;
if (ok != NULL)
*ok = 1;
}
EVP_MD_free(sha1);
return ret;
}
and the canonical representation of the name is set here: https://github.com/openssl/openssl/blob/256d41d4371720ccfe1a4fead6bd28ed5071bcdd/crypto/x509/x_name.c#L303
/*
* This function generates the canonical encoding of the Name structure. In
* it all strings are converted to UTF8, leading, trailing and multiple
* spaces collapsed, converted to lower case and the leading SEQUENCE header
* removed. In future we could also normalize the UTF8 too. By doing this
* comparison of Name structures can be rapidly performed by just using
* memcmp() of the canonical encoding. By omitting the leading SEQUENCE name
* constraints of type dirName can also be checked with a simple memcmp().
*/
static int x509_name_canon(X509_NAME *a)
{
unsigned char *p;
STACK_OF(STACK_OF_X509_NAME_ENTRY) *intname;
STACK_OF(X509_NAME_ENTRY) *entries = NULL;
X509_NAME_ENTRY *entry, *tmpentry = NULL;
int i, set = -1, ret = 0, len;
OPENSSL_free(a->canon_enc);
a->canon_enc = NULL;
/* Special case: empty X509_NAME => null encoding */
if (sk_X509_NAME_ENTRY_num(a->entries) == 0) {
a->canon_enclen = 0;
return 1;
}
intname = sk_STACK_OF_X509_NAME_ENTRY_new_null();
if (intname == NULL) {
ERR_raise(ERR_LIB_X509, ERR_R_MALLOC_FAILURE);
goto err;
}
for (i = 0; i < sk_X509_NAME_ENTRY_num(a->entries); i++) {
entry = sk_X509_NAME_ENTRY_value(a->entries, i);
if (entry->set != set) {
entries = sk_X509_NAME_ENTRY_new_null();
if (entries == NULL)
goto err;
if (!sk_STACK_OF_X509_NAME_ENTRY_push(intname, entries)) {
sk_X509_NAME_ENTRY_free(entries);
ERR_raise(ERR_LIB_X509, ERR_R_MALLOC_FAILURE);
goto err;
}
set = entry->set;
}
tmpentry = X509_NAME_ENTRY_new();
if (tmpentry == NULL) {
ERR_raise(ERR_LIB_X509, ERR_R_MALLOC_FAILURE);
goto err;
}
tmpentry->object = OBJ_dup(entry->object);
if (tmpentry->object == NULL) {
ERR_raise(ERR_LIB_X509, ERR_R_MALLOC_FAILURE);
goto err;
}
if (!asn1_string_canon(tmpentry->value, entry->value))
goto err;
if (!sk_X509_NAME_ENTRY_push(entries, tmpentry)) {
ERR_raise(ERR_LIB_X509, ERR_R_MALLOC_FAILURE);
goto err;
}
tmpentry = NULL;
}
/* Finally generate encoding */
len = i2d_name_canon(intname, NULL);
if (len < 0)
goto err;
a->canon_enclen = len;
p = OPENSSL_malloc(a->canon_enclen);
if (p == NULL) {
ERR_raise(ERR_LIB_X509, ERR_R_MALLOC_FAILURE);
goto err;
}
a->canon_enc = p;
i2d_name_canon(intname, &p);
ret = 1;
err:
X509_NAME_ENTRY_free(tmpentry);
sk_STACK_OF_X509_NAME_ENTRY_pop_free(intname,
local_sk_X509_NAME_ENTRY_pop_free);
return ret;
}
Possibly, but you need to know all the Object Ids (OIDs) of all the relative Distinguished Name components (RDNs). e.g. CN=2.5.4.2 and then the ASN1 encoding. You need to rebuild the binary ASN1 blob then take the sha1 digest then take the first 4 bytes of that in reverse order.
I've created a bash script which uses the nameopts to get the subject in as close to ASN1 encoding as possible and also the OIDs. Then I've mashed all that together. https://github.com/nimpo/hash-bash-ssl
Quoting form this online kernel doc
SO_TIMESTAMPING
Generates timestamps on reception, transmission or both. Supports
multiple timestamp sources, including hardware. Supports generating
timestamps for stream sockets.
Linux supports TCP timestamping, and I tried to write some demo code to get any timestamp for TCP packet.
The server code as below:
//Bind
if( bind(socket_desc,(struct sockaddr *)&server , sizeof(server)) < 0)
{
perror("bind failed. Error");
return 1;
}
puts("bind done");
//Listen
listen(socket_desc , 3);
//Accept and incoming connection
puts("Waiting for incoming connections...");
int c = sizeof(struct sockaddr_in);
client_sock = accept(socket_desc, (struct sockaddr *)&client, (socklen_t*)&c);
if (client_sock < 0)
{
perror("accept failed");
return 1;
}
// Note: I am trying to get software timestamp only here..
int oval = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE;
int olen = sizeof( oval );
if ( setsockopt( client_sock, SOL_SOCKET, SO_TIMESTAMPING, &oval, olen ) < 0 )
{ perror( "setsockopt TIMESTAMP"); exit(1); }
puts("Connection accepted");
char buf[] = "----------------------------------------";
int len = strlen( buf );
struct iovec myiov[1] = { {buf, len } };
unsigned char cbuf[ 40 ] = { 0 };
int clen = sizeof( cbuf );
struct msghdr mymsghdr = { 0 };
mymsghdr.msg_name = NULL;
mymsghdr.msg_namelen = 0;
mymsghdr.msg_iov = myiov;
mymsghdr.msg_iovlen = 1;
mymsghdr.msg_control = cbuf;
mymsghdr.msg_controllen = clen;
mymsghdr.msg_flags = 0;
int read_size = recvmsg( client_sock, &mymsghdr, 0);
if(read_size == 0)
{
puts("Client disconnected");
fflush(stdout);
}
else if(read_size == -1)
{
perror("recv failed");
}
else
{
struct msghdr *msgp = &mymsghdr;
printf("msg received: %s \n",(char*)msgp->msg_iov[0].iov_base);// This line is successfully hit.
// Additional info: print msgp->msg_controllen inside gdb is 0.
struct cmsghdr *cmsg;
for ( cmsg = CMSG_FIRSTHDR( msgp );
cmsg != NULL;
cmsg = CMSG_NXTHDR( msgp, cmsg ) )
{
printf("Time GOT!\n"); // <-- This line is not hit.
if (( cmsg->cmsg_level == SOL_SOCKET )
&&( cmsg->cmsg_type == SO_TIMESTAMPING ))
printf("TIME GOT2\n");// <-- of course , this line is not hit
}
}
Any ideas why no timestamping is available here ? Thanks
Solution
I am able to get the software timestamp along with hardware timestamp using onload with solarflare NIC.
Still no idea how to get software timestamp alone.
The link you gave, in the comments at the end, says:
I've discovered why it doesn't work. SIOCGSTAMP only works for UDP
packets or RAW sockets, but does not work for TCP. – Gio Mar 17 '16 at 9:331
it doesn't make sense to ask for timestamps for TCP, because there's
no direct correlation between arriving packets and data becoming
available. If you really want timestamps for TCP you'll have to use
RAW sockets and implement your own TCP stack (or use a userspace TCP
library). – ecatmur Jul 4 '16 at 10:39
We are using libmodbus library to read register values from energy meter EM6400 which supports Modbus over RTU. We are facing the following two issues.
1) We are facing an issue with modbus_read_registers API, this API returns -1 and the error message is:
ERROR Connection timed out: select.
After debugging the library, we found this issue is due to the echo of request bytes in the response message.
read() API call in _modbus_rtu_recv returns request bytes first followed by response bytes. As a result, length_to_read is calculated in compute_data_length_after_meta() based on the request bytes instead of response bytes (which contains the number of bytes read) and connection timed out issue occurs.
We tried to use both 3.0.6 and 3.1.2 libmodbus versions but same issue occurs in both the versions.
2) modbus_rtu_set_serial_mode (ctx, MODBUS_RTU_RS485) returns "BAD file descriptor".
Please confirm if there is any API call missing or any parameter is not set correctly.
Our sample code to read register value is as follows.
int main()
{
modbus_t *ctx;
uint16_t tab_reg[2] = {0,0};
float avgVLL = -1;;
int res = 0;
int rc;
int i;
struct timeval response_timeout;
uint32_t tv_sec = 0;
uint32_t tv_usec = 0;
response_timeout.tv_sec = 5;
response_timeout.tv_usec = 0;
ctx = modbus_new_rtu("/dev/ttyUSB0", 19200, 'E', 8, 1);
if (NULL == ctx)
{
printf("Unable to create libmodbus context\n");
res = 1;
}
else
{
printf("created libmodbus context\n");
modbus_set_debug(ctx, TRUE);
//modbus_set_error_recovery(ctx, MODBUS_ERROR_RECOVERY_LINK |MODBUS_ERROR_RECOVERY_PROTOCOL);
rc = modbus_set_slave(ctx, 1);
printf("modbus_set_slave return: %d\n",rc);
if (rc != 0)
{
printf("modbus_set_slave: %s \n",modbus_strerror(errno));
}
/* Commented - Giving 'Bad File Descriptor' issue
rc = modbus_rtu_set_serial_mode(ctx, MODBUS_RTU_RS485);
printf("modbus_rtu_set_serial_mode: %d \n",rc);
if (rc != 0)
{
printf("modbus_rtu_set_serial_mode: %s \n",modbus_strerror(errno));
}
*/
// This code is for version 3.0.6
modbus_get_response_timeout(ctx, &response_timeout);
printf("Default response timeout:%ld sec %ld usec \n", response_timeout.tv_sec, response_timeout.tv_usec );
response_timeout.tv_sec = 60;
response_timeout.tv_usec = 0;
modbus_set_response_timeout(ctx, &response_timeout);
modbus_get_response_timeout(ctx, &response_timeout);
printf("Set response timeout:%ld sec %ld usec \n", response_timeout.tv_sec, response_timeout.tv_usec );
/* This code is for version 3.1.2
modbus_get_response_timeout(ctx, &tv_sec, &tv_usec);
printf("Default response timeout:%d sec %d usec \n",tv_sec,tv_usec );
tv_sec = 60;
tv_usec = 0;
modbus_set_response_timeout(ctx, tv_sec,tv_usec);
modbus_get_response_timeout(ctx, &tv_sec, &tv_usec);
printf("Set response timeout:%d sec %d usec \n",tv_sec,tv_usec );
*/
rc = modbus_connect(ctx);
printf("modbus_connect: %d \n",rc);
if (rc == -1) {
printf("Connection failed: %s\n", modbus_strerror(errno));
res = 1;
}
rc = modbus_read_registers(ctx, 3908, 2, tab_reg);
printf("modbus_read_registers: %d \n",rc);
if (rc == -1) {
printf("Read registers failed: %s\n", modbus_strerror(errno));
res = 1;
}
for (i=0; i < 2; i++) {
printf("reg[%d]=%d (0x%X)\n", i, tab_reg[i], tab_reg[i]);
}
avgVLL = modbus_get_float(tab_reg);
printf("Average Line to Line Voltage = %f\n", avgVLL);
modbus_close(ctx);
modbus_free(ctx);
}
}
Output of this sample is as follows:
created libmodbus context
modbus_set_slave return: 0
modbus_rtu_set_serial_mode: -1
modbus_rtu_set_serial_mode: Bad file descriptor
Default response timeout:0 sec 500000 usec
Set response timeout:60 sec 0 usec
Opening /dev/ttyUSB0 at 19200 bauds (E, 8, 1)
modbus_connect: 0
[01][03][0F][44][00][02][87][0A]
Waiting for a confirmation...
ERROR Connection timed out: select
<01><03><0F><44><00><02><87><0A><01><03><04><C4><5F><43><D4><C6><7E>modbus_read_registers: -1
Read registers failed: Connection timed out
reg[0]=0 (0x0)
reg[1]=0 (0x0)
Average Line to Line Voltage = 0.000000
Issue 1) is probably a hardware issue, with "local echo" enabled in your RS-485 adapter. Local echo is sometimes used to confirm sending of data bytes on the bus. You need to disable it, or find another RS-485 adapter.
I have written about this in the documentation of my MinimalModbus Python library: Local Echo
It lists a few common ways to disable local echo in RS-485 adapters.
anybody care to share some insights on how to use LSP for packet modifying ?
I am using the non IFS subtype and I can see how (pseudo?) packets first enter WSPRecv. But how do I modify them ? My inquiry is about one single HTTP response that causes WSPRecv to be called 3 times :((. I need to modify several parts of this response, but since it comes in 3 slices, it is pretty hard to modify it accordingly. And, maybe on other machines or under different conditions (such as high traffic) there would only be one sole WSPRecv call, or maybe 10 calls. What is the best way to work arround this (please no NDIS :D), and how to properly change the buffer (lpBuffers->buf) by increasing it ?
int WSPAPI
WSPRecv(
SOCKET s,
LPWSABUF lpBuffers,
DWORD dwBufferCount,
LPDWORD lpNumberOfBytesRecvd,
LPDWORD lpFlags,
LPWSAOVERLAPPED lpOverlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine,
LPWSATHREADID lpThreadId,
LPINT lpErrno
)
{
LPWSAOVERLAPPEDPLUS ProviderOverlapped = NULL;
SOCK_INFO *SocketContext = NULL;
int ret = SOCKET_ERROR;
*lpErrno = NO_ERROR;
//
// Find our provider socket corresponding to this one
//
SocketContext = FindAndRefSocketContext(s, lpErrno);
if ( NULL == SocketContext )
{
dbgprint( "WSPRecv: FindAndRefSocketContext failed!" );
goto cleanup;
}
//
// Check for overlapped I/O
//
if ( NULL != lpOverlapped )
{
/*bla bla .. not interesting in my case*/
}
else
{
ASSERT( SocketContext->Provider->NextProcTable.lpWSPRecv );
SetBlockingProvider(SocketContext->Provider);
ret = SocketContext->Provider->NextProcTable.lpWSPRecv(
SocketContext->ProviderSocket,
lpBuffers,
dwBufferCount,
lpNumberOfBytesRecvd,
lpFlags,
lpOverlapped,
lpCompletionRoutine,
lpThreadId,
lpErrno);
SetBlockingProvider(NULL);
//is this the place to modify packet length and contents ?
if (strstr(lpBuffers->buf, "var mapObj = null;"))
{
int nLen = strlen(lpBuffers->buf) + 200;
/*CHAR *szNewBuf = new CHAR[];
CHAR *pIndex;
pIndex = strstr(lpBuffers->buf, "var mapObj = null;");
nLen = strlen(strncpy(szNewBuf, lpBuffers->buf, (pIndex - lpBuffers->buf) * sizeof (CHAR)));
nLen = strlen(strncpy(szNewBuf + nLen * sizeof(CHAR), "var com = null;\r\n", 17 * sizeof(CHAR)));
pIndex += 18 * sizeof(CHAR);
nLen = strlen(strncpy(szNewBuf + nLen * sizeof(CHAR), pIndex, 1330 * sizeof (CHAR)));
nLen = strlen(strncpy(szNewBuf + nLen * sizeof(CHAR), "if (com == null)\r\n" \
"com = new ActiveXObject(\"InterCommJS.Gateway\");\r\n" \
"com.lat = latitude;\r\n" \
"com.lon = longitude;\r\n}", 111 * sizeof (CHAR)));
pIndex = strstr(szNewBuf, "Content-Length:");
pIndex += 16 * sizeof(CHAR);
strncpy(pIndex, "1465", 4 * sizeof(CHAR));
lpBuffers->buf = szNewBuf;
lpBuffers->len += 128;*/
}
if ( SOCKET_ERROR != ret )
{
SocketContext->BytesRecv += *lpNumberOfBytesRecvd;
}
}
cleanup:
if ( NULL != SocketContext )
DerefSocketContext( SocketContext, lpErrno );
return ret;
}
Thank you
my comment worked out. http response headers / request turned out to end in \r\n\r\n.