opensc-pkcs11 not getting any token - pkcs#11

I am writing a sample program using opensc-pkcs11.so in redhat linux. This is for pure software implementation of AES encryption/decryption. I am not using for any card.
My program intilizes the cryptoki successfully but giving CKR_TOKEN_NOT_PRESENT error.
code snippet is given.
CK_FUNCTION_LIST_PTR pFunctionList;
CK_C_Initialize pC_Initialize;
CK_RV rv;
rv = C_GetFunctionList(&pFunctionList);
if(rv == CKR_OK)
pC_Initialize = pFunctionList -> C_Initialize;
rv = (*pC_Initialize)(NULL_PTR);
CK_ULONG ulSlotCount;
CK_SLOT_ID_PTR pSlotList;
CK_C_GetSlotList pC_GetSlotList;
pC_GetSlotList = pFunctionList -> C_GetSlotList;
rv = (*pC_GetSlotList)(CK_FALSE, NULL_PTR, &ulSlotCount);
/* Get list of all slots */
//rv = C_GetSlotList(FALSE, NULL_PTR, &ulSlotCount);
if (rv == CKR_OK)
{
cout<<"ulSlotCount="<<ulSlotCount<<endl;
pSlotList =
(CK_SLOT_ID_PTR)
malloc(ulSlotCount*sizeof(CK_SLOT_ID));
//rv = C_GetSlotList(FALSE, pSlotList, &ulSlotCount);
rv = (*pC_GetSlotList)(CK_FALSE, pSlotList, &ulSlotCount);
if (rv == CKR_OK)
{
/* Now use that list of all slots */
l_lSlotId = pSlotList[0];
cerr<<"lSlotId="<<l_lSlotId<<endl;
}
CK_SLOT_INFO slotInfo;
CK_TOKEN_INFO tokenInfo;
CK_C_GetSlotInfo pC_GetSlotInfo;
pC_GetSlotInfo = pFunctionList -> C_GetSlotInfo;
/* Get slot information for first slot */
rv = (*pC_GetSlotInfo)(pSlotList[0], &slotInfo);
fprintf(stderr, "pC_GetSlotInfo: rv = 0x%.8X\n", rv);
if(rv == CKR_OK)
{
/* Get token information for first slot */
cerr<<"pC_GetSlotInfo OK"<<endl;
CK_C_GetTokenInfo pC_GetTokenInfo;
pC_GetTokenInfo = pFunctionList -> C_GetTokenInfo;
rv = (*pC_GetTokenInfo)(pSlotList[0], &tokenInfo);
}
fprintf(stderr, "pC_GetTokenInfo: rv = 0x%.8X\n", rv);
if (rv == CKR_TOKEN_NOT_PRESENT)
{
cerr<<"CKR_TOKEN_NOT_PRESENT"<<endl;
}
free(pSlotList);
}
Can anybody give idea about what is happening? I believe opensc-pkcs11 can be used for just software implementation also.
Thanks in advance.

PKCS#11 library shipped with OpenSC acts "only as a driver" for a bunch of generally available cryptographic smart cards so unless you have a physical card reader connected to your computer it won't find any slots. If you are looking for a pure software PKCS#11 implementation then I believe you should pick one from my answer to your previous question. If none of them suits your need then maybe you could use some general purpose cryptographic library such as OpenSSL, GnuTLS or Botan.

Related

stm32L476 - erasing flash

The L4 series erases flash using pages (or banks, if you do a full erase).
But I'm having some problem writing after doing page erases, and I'm not sure why.
Just to outline the objective I am storing 6 values starting at 0x08080000 (Page 256)
then I am storing more values from 0x08080800) (page 257) to 0x08085800 (page 267)
There is a single function that I use to erase/write the values at page 256:
void write_bias_flash(int16_t biases[]) {
uint16_t *flash_biases = (uint16_t*) (ADDR_FLASH_PAGE_256);
static FLASH_EraseInitTypeDef EraseInitStruct;
Address = ADDR_FLASH_PAGE_256;
/* Fill EraseInit structure*/
EraseInitStruct.TypeErase = FLASH_TYPEERASE_PAGES;
EraseInitStruct.Page = 0;
EraseInitStruct.Banks = FLASH_BANK_2;
EraseInitStruct.NbPages = 1;
HAL_FLASH_Unlock();
if (HAL_FLASHEx_Erase(&EraseInitStruct, &PAGEError) != HAL_OK) {
serprintf("Error erasing biases at address: 0x%x", Address);
}
for (int8_t bias = 0; bias < 6; bias++) {
if (HAL_FLASH_Program(FLASH_TYPEPROGRAM_DOUBLEWORD,
Address + bias * sizeof(uint64_t), (uint64_t) biases[bias])
!= HAL_OK)
serprintf("Error writing biases to flash.");
}
HAL_FLASH_Lock();
serprintf("Biases stored in flash.");
}
This work great. No issues.
I have two functions I use to erase/write the data starting at 0x08080800) (page 257):
void Erase_TM_Flash() {
uint8_t *flash = (uint8_t*) (FLASH_USER_START_ADDR);
uint8_t *b = (uint16_t*) (ADDR_FLASH_PAGE_256);
static FLASH_EraseInitTypeDef EraseInitStruct;
Address = FLASH_USER_START_ADDR;
/* Fill EraseInit structure*/
EraseInitStruct.TypeErase = FLASH_TYPEERASE_PAGES;
EraseInitStruct.Page = 1;
EraseInitStruct.NbPages = 255;
EraseInitStruct.Banks = FLASH_BANK_2;
HAL_FLASH_Unlock();
if (HAL_FLASHEx_Erase(&EraseInitStruct, &PAGEError) != HAL_OK) {
serprintf("Error erasing biases at address: 0x%x", Address);
}
HAL_FLASH_Lock();
for (uint16_t i = 0; i< (FLASH_ROW_SIZE * sizeof(uint64_t))*255; i++)
{
if ((uint16_t) *(flash+i) != 255) {
serprintf("0x%x is not erased (%i)", flash+i, (uint16_t) *(flash+i));
}
}
}
void Save_to_Flash(uint32_t *data) {
uint32_t src_addr = (uint32_t) data;
Erase_TM_Flash();
serprintf("Saving to flash...");
HAL_StatusTypeDef HAL_STATUS;
HAL_FLASH_Unlock();
Address = FLASH_USER_START_ADDR;
while (Address < (FLASH_USER_END_ADDR - (FLASH_ROW_SIZE * sizeof(uint64_t)))) {
HAL_STATUS = HAL_FLASH_Program(FLASH_TYPEPROGRAM_FAST, Address, (uint64_t) src_addr);
if (HAL_STATUS == HAL_OK) {
Address = Addres+ (FLASH_ROW_SIZE * sizeof(uint64_t));
src_addr = src_addr + (FLASH_ROW_SIZE * sizeof(uint64_t));
} else {
serprintf("Error writing flash at address 0x%x. (%i)", Address, HAL_STATUS);
Address = Address + (FLASH_ROW_SIZE * sizeof(uint64_t));
src_addr = src_addr + (FLASH_ROW_SIZE * sizeof(uint64_t));
}
}
HAL_FLASH_Lock();
serprintf("Done");
}
The erase works fine. I verify the values in the debugger (and in the code I check for non-erased pages). But when the saving occurs:
Error writing flash at address 0x8080800. (1)
Error writing flash at address 0x8080900. (1)
Error writing flash at address 0x8080a00. (1)
Error writing flash at address 0x8080b00. (1)
And so on through all the remaining pages.
However, if I erase the entire flash:
void Erase_Flash() {
serprintf("Erasing flash...");
HAL_FLASH_Unlock();
/* Clear OPTVERR bit set on virgin samples */
__HAL_FLASH_CLEAR_FLAG(FLASH_FLAG_OPTVERR);
/* Fill EraseInit structure*/
EraseInitStruct.TypeErase = FLASH_TYPEERASE_MASSERASE;
EraseInitStruct.Banks = FLASH_BANK_2;
if (HAL_FLASHEx_Erase(&EraseInitStruct, &PAGEError) != HAL_OK) {
serprintf("Error erasing flash.");
}
HAL_FLASH_Lock();
serprintf("Done.");
}
Then the writing works like a charm.
HAL_STATUS = 1, which according to the code I found is HAL_ERROR = 0x01U, which isn't entirely helpful.
I am not sure what the difference in, but I am hoping another set of eye on my erasure might reveal the issue.
Thanks!
This issue seems to be related to flash fast programming, which isn't available on all STM32 models.
According to the Reference Manual (RM0351), the flash must be mass erased before using fast programming. Otherwise a Programming Sequence Error occurs, and the PGSERR bit in the FLASH_SR register will be set. See 3.3.7 Flash main memory programming sequences in sections Fast Programming / Programming errors and 3.7.5 Flash status register (FLASH_SR) under Bit 7 PGSERR.
RM0351 Rev 9, 3.3.7 Flash main memory programming sequences, on page 106:
Fast programming
(...)
1. Perform a mass erase of the bank to program. If not, PGSERR is set.
2. (...)
RM0351 Rev 9, 3.3.7 Flash main memory programming sequences, on page 107:
Programming errors
(...)
PGSERR: Programming Sequence Error
PGSERR is set if one of the following conditions occurs:
– (...)
– In the fast programming sequence: the Mass erase is not performed before setting
FSTPG bit.
– (...)
The observed behavior is therefore as expected. - So you could replace your Erase_TM_Flash() function and use Erase_Flash() to mass erase the entire flash bank first. Or, avoid using flash fast programming altogether and use FLASH_Program_DoubleWord() or FLASH_Program(FLASH_TYPEPROGRAM_DOUBLEWORD, ...) instead.
Related source files: stm32l4xx_hal_flash.h, stm32l4xx_hal_flash.c
Related post: STM32 - writing and reading flash

Interpreting keypresses sent to raspberry-pi through uv4l-webrtc datachannel

I apologize if this doesn't make sense since I'm still a newbie with using a raspberry pi and this is my first time posting on StackOverflow.
I am making a web app that lets me stream video to and from a raspberry pi while also letting me send keycodes. The sent keycodes would ultimately let me control servos on a drone. After scouring the internet, I figured that the simplest way to stream 2-way video is by using uv4l so I have it installed along with uv4l-webrtc on my raspberry pi. I hooked up some GPIO pins to a flight controller and I am using pigpio to send PWM signals to it, which I then monitor using CleanFlight.
Right now, I can manipulate with keypresses the roll, pitch, etc. of the flight controller using a python script if I access the pi remotely using VNC, but I would like to ultimately be able to do this through my custom web page that is being served by the uv4l-server. I am trying to use the WebRTC Data Channels, but I'm having some trouble understanding what I would need to do to recognize the messages sent through the data channels. I know that the data channels are opened when a video call is initiated and I've tried the test in this link to see if I can indeed send keycodes to the pi (and I can).
My problem right now is that I have no idea where those sent messages go or how I can get them so I can incorporate them into my python script. Would I need to make a server that would listen for the keycodes being sent to the pi?
tl;dr I have a python script on a raspberry pi to control servos on a flight controller using keypresses and a separate webpage that streams video using WebRTC, but I have no idea how to combine them together using WebRTC data channels.
Thanks to #adminkiam for the solution. Here's a version of the python script that now listens to the socket. It's essentially a variation of this code by the person who made pigpio:
import socket
import time
import pigpio
socket_path = '/tmp/uv4l.socket'
try:
os.unlink(socket_path)
except OSError:
if os.path.exists(socket_path):
raise
s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
ROLL_PIN = 13
PITCH_PIN = 14
YAW_PIN = 15
MIN_PW = 1000
MID_PW = 1500
MAX_PW = 2000
NONE = 0
LEFT_ARROW = 1
RIGHT_ARROW = 2
UP_ARROW = 3
DOWN_ARROW = 4
LESS_BTN = 5
GREATER_BTN = 6
print 'socket_path: %s' % socket_path
s.bind(socket_path)
s.listen(1)
def getch(keyCode):
key = NONE
if keyCode == 188:
key = LESS_BTN
elif keyCode == 190:
key = GREATER_BTN
elif keyCode == 37:
key = LEFT_ARROW
elif keyCode == 39:
key = RIGHT_ARROW
elif keyCode == 38:
key = UP_ARROW
elif keyCode == 40:
key = DOWN_ARROW
return key
def cleanup():
pi.set_servo_pulsewidth(ROLL_PIN, 0)
pi.set_servo_pulsewidth(PITCH_PIN, 0)
pi.set_servo_pulsewidth(YAW_PIN, 0)
pi.stop()
while True:
print 'awaiting connection...'
connection, client_address = s.accept()
print 'client_address %s' % client_address
try:
print 'established connection with', client_address
pi = pigpio.pi()
rollPulsewidth = MID_PW
pitchPulsewidth = MID_PW
yawPulsewidth = MID_PW
pi.set_servo_pulsewidth(ROLL_PIN, rollPulsewidth)
pi.set_servo_pulsewidth(PITCH_PIN, pitchPulsewidth)
pi.set_servo_pulsewidth(YAW_PIN, yawPulsewidth)
while True:
data = connection.recv(16)
print 'received message"%s"' % data
time.sleep(0.01)
key = getch(int(data))
rollPW = rollPulsewidth
pitchPW = pitchPulsewidth
yawPW = yawPulsewidth
if key == UP_ARROW:
pitchPW = pitchPW + 10
if pitchPW > MAX_PW:
pitchPW = MAX_PW
elif key == DOWN_ARROW:
pitchPW = pitchPW - 10
if pitchPW < MIN_PW:
pitchPW = MIN_PW
elif key == LEFT_ARROW:
rollPW = rollPW - 10
if rollPW < MIN_PW:
rollPW = MIN_PW
elif key == RIGHT_ARROW:
rollPW = rollPW + 10
if rollPW > MAX_PW:
rollPW = MAX_PW
elif key == GREATER_BTN:
yawPW = yawPW + 10
if yawPW > MAX_PW:
yawPW = MAX_PW
elif key == LESS_BTN:
yawPW = yawPW - 10
if yawPW < MIN_PW:
yawPW = MIN_PW
if rollPW != rollPulsewidth:
rollPulsewidth = rollPW
pi.set_servo_pulsewidth(ROLL_PIN, rollPulsewidth)
if pitchPW != pitchPulsewidth:
pitchPulsewidth = pitchPW
pi.set_servo_pulsewidth(PITCH_PIN, pitchPulsewidth)
if yawPW != yawPulsewidth:
yawPulsewidth = yawPW
pi.set_servo_pulsewidth(YAW_PIN, yawPulsewidth)
if data:
print 'echo data to client'
connection.sendall(data)
else:
print 'no more data from', client_address
break
finally:
# Clean up the connection
cleanup()
connection.close()
When a WebRTC data channel is created between UV4L and the other WebRTC peer (i.e. a browser, Janus Gateway, etc...), UV4L creates a full-duplex Unix Domain Socket (/tmp/uv4l.socket by default) from/to which you can receive/send messages on the Raspberry Pi. Your python script should just open, listen and read to the socket for the incoming messages from the e.g. web application and/or write the messages to the same socket for the web app to receive them. An example doing this in C++ is under the link to the tutorial you pointed out in your question:
/*
Copyright (c) 2016 info#linux-projects.org
All rights reserved.
Redistribution and use in source and binary forms are permitted
provided that the above copyright notice and this paragraph are
duplicated in all such forms and that any documentation,
advertising materials, and other materials related to such
distribution and use acknowledge that the software was developed
by the linux-projects.org. The name of the
linux-projects.org may not be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
* This is a simple echo server.
* It creates to a unix domain socket of type SOCK_SEQPACKET specified by
* command line, listens to it waiting for incoming messages from clients
* (e.g. UV4L) and replies the received messages back to the senders.
*
* Example:
* $ ./datachannel_server /tmp/uv4l.socket
*
* To compile this program you need boost v1.60 or greater, for example:
* g++ -Wall -I/path/to/boost/include/ -std=c++11 datachannel_server.cpp -L/path/to/boost/lib -l:libboost_coroutine.a -l:libboost_context.a -l:libboost_system.a -l:libboost_thread.a -pthread -o datachannel_server
*/
#include <boost/asio/io_service.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/write.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/asio.hpp>
#include <memory>
#include <cstdio>
#include <array>
#include <functional>
#include <iostream>
#if !defined(BOOST_ASIO_HAS_LOCAL_SOCKETS)
#error Local sockets not available on this platform.
#endif
constexpr std::size_t MAX_PACKET_SIZE = 1024 * 16;
namespace seqpacket {
struct seqpacket_protocol {
int type() const {
return SOCK_SEQPACKET;
}
int protocol() const {
return 0;
}
int family() const {
return AF_UNIX;
}
using endpoint = boost::asio::local::basic_endpoint<seqpacket_protocol>;
using socket = boost::asio::generic::seq_packet_protocol::socket;
using acceptor = boost::asio::basic_socket_acceptor<seqpacket_protocol>;
#if !defined(BOOST_ASIO_NO_IOSTREAM)
/// The UNIX domain iostream type.
using iostream = boost::asio::basic_socket_iostream<seqpacket_protocol>;
#endif
};
}
using seqpacket::seqpacket_protocol;
struct session : public std::enable_shared_from_this<session> {
explicit session(seqpacket_protocol::socket socket) : socket_(std::move(socket)) {}
~session() {
//std::cerr << "session closed\n";
}
void echo(boost::asio::yield_context yield) {
auto self = shared_from_this();
try {
for (;;) {
seqpacket_protocol::socket::message_flags in_flags = MSG_WAITALL, out_flags = MSG_WAITALL;
// Wait for the message from the client
auto bytes_transferred = socket_.async_receive(boost::asio::buffer(data_), in_flags, yield);
// Write the same message back to the client
socket_.async_send(boost::asio::buffer(data_, bytes_transferred), out_flags, yield);
}
} catch (const std::exception& e) {
std::cerr << e.what() << '\n';
socket_.close();
}
}
void go() {
boost::asio::spawn(socket_.get_io_service(), std::bind(&session::echo, this, std::placeholders::_1));
}
private:
seqpacket_protocol::socket socket_;
std::array<char, MAX_PACKET_SIZE> data_;
};
int main(int argc, char* argv[]) {
try {
if (argc != 2) {
std::cerr << "Usage: datachannel_server <file> (e.g. /tmp/uv4l.socket)\n";
std::cerr << "*** WARNING: existing file is removed ***\n";
return EXIT_FAILURE;
}
boost::asio::io_service io_service;
std::remove(argv[1]);
boost::asio::spawn(io_service, [&](boost::asio::yield_context yield) {
seqpacket_protocol::acceptor acceptor_(io_service, seqpacket_protocol::endpoint(argv[1]));
for (;;) {
boost::system::error_code ec;
seqpacket_protocol::socket socket_(io_service);
acceptor_.async_accept(socket_, yield[ec]);
if (!ec)
std::make_shared<session>(std::move(socket_))->go();
}
});
io_service.run();
} catch (std::exception& e) {
std::cerr << "Exception: " << e.what() << "\n";
return EXIT_FAILURE;
}
}

Java_java_net_PlainSocketImpl_socketSetOption

in open-jdk-8 :
this jin function : Java_java_net_PlainSocketImpl_socketSetOption:
/*
* SO_TIMEOUT is a no-op on Solaris/Linux
*/
if (cmd == java_net_SocketOptions_SO_TIMEOUT) {
return;
}
file: openjdk7/jdk/src/solaris/native/java/net/PlainSocketImpl.c
does this mean , on linux setOption of SO_TIMEOUT will be ignored ?
I am can't found the jin for linux. but the solaris's code seems also works for linux .
No, it just means it isn't implemented as a socket option. Some platforms don't support it. On those platforms select() or friends are used.
The source inside solaris folder is also used for Linux.
SO_TIMEOUT is ignored in Java_java_net_PlainSocketImpl_socketSetOption0. But timeout is kept as a field when AbstractPlainSocketImpl.setOption is called:
case SO_TIMEOUT:
if (val == null || (!(val instanceof Integer)))
throw new SocketException("Bad parameter for SO_TIMEOUT");
int tmp = ((Integer) val).intValue();
if (tmp < 0)
throw new IllegalArgumentException("timeout < 0");
// Saved for later use
timeout = tmp;
break;
And timeout is used when doing read in SocketInputStream:
public int read(byte b[], int off, int length) throws IOException {
return read(b, off, length, impl.getTimeout());
}

Erasing page on stm32 fails with FLASH_ERROR_WRP

I am trying to erase one page in flash on an STM32F103RB like so:
FLASH_Unlock();
FLASH_ClearFlag(FLASH_FLAG_BSY | FLASH_FLAG_EOP | FLASH_FLAG_PGERR | FLASH_FLAG_WRPRTERR | FLASH_FLAG_OPTERR);
FLASHStatus = FLASH_ErasePage(Page);
However, FLASH_ErasePage fails producing FLASH_ERROR_WRP
Manually enabling/disabling write protection in the stm32-linker tool doesn't fix the problem.
Basically FLASH_ErasePage fails with WRP error without trying to do anything if there's previous WRP error in the status register.
What comes to your FLASH_ClearFlag call, at least FLASH_FLAG_BSY will cause assert_param(IS_FLASH_CLEAR_FLAG(FLASH_FLAG)); to fail (though I'm not really sure what happens in this case).
#define IS_FLASH_CLEAR_FLAG(FLAG) ((((FLAG) & (uint32_t)0xFFFFC0FD) == 0x00000000) && ((FLAG) != 0x00000000))
What is your page address ? Which address are you trying to access ?
For instance, this example is tested on STM32F100C8 in terms of not only erasing but also writing data correctly.
http://www.ozturkibrahim.com/TR/eeprom-emulation-on-stm32/
If using the HAL driver, your code might look like this (cut'n paste from an real project)
static HAL_StatusTypeDef Erase_Main_Program ()
{
FLASH_EraseInitTypeDef ins;
uint32_t sectorerror;
ins.TypeErase = FLASH_TYPEERASE_SECTORS;
ins.Banks = FLASH_BANK_1; /* Do not care, used for mass-erase */
#warning We currently erase from sector 2 (only keep 64KB of flash for boot))
ins.Sector = FLASH_SECTOR_4;
ins.NbSectors = 4;
ins.VoltageRange = FLASH_VOLTAGE_RANGE_3; /* voltage-range defines how big blocks can be erased at the same time */
return HAL_FLASHEx_Erase (&ins, &sectorerror);
}
The internal function in the HAL driver that actually does the work
void FLASH_Erase_Sector(uint32_t Sector, uint8_t VoltageRange)
{
uint32_t tmp_psize = 0U;
/* Check the parameters */
assert_param(IS_FLASH_SECTOR(Sector));
assert_param(IS_VOLTAGERANGE(VoltageRange));
if(VoltageRange == FLASH_VOLTAGE_RANGE_1)
{
tmp_psize = FLASH_PSIZE_BYTE;
}
else if(VoltageRange == FLASH_VOLTAGE_RANGE_2)
{
tmp_psize = FLASH_PSIZE_HALF_WORD;
}
else if(VoltageRange == FLASH_VOLTAGE_RANGE_3)
{
tmp_psize = FLASH_PSIZE_WORD;
}
else
{
tmp_psize = FLASH_PSIZE_DOUBLE_WORD;
}
/* If the previous operation is completed, proceed to erase the sector */
CLEAR_BIT(FLASH->CR, FLASH_CR_PSIZE);
FLASH->CR |= tmp_psize;
CLEAR_BIT(FLASH->CR, FLASH_CR_SNB);
FLASH->CR |= FLASH_CR_SER | (Sector << POSITION_VAL(FLASH_CR_SNB));
FLASH->CR |= FLASH_CR_STRT;
}
Second thing to check. Is interrupts enabled, and is there any hardware access between the unlock call and the erase call?
I hope this helps

XSD Validation on iOS

I want to validate XML files against an XSD on iOS. The documentations alludes to using NSXMLDocument to do this, but its not available on iOS =(. Are there any light weight alternatives to do this on iOS?
I ended up using the validation facilities in libxml2 since its a library already included in iOS. Following this sample code
#include <libxml/parser.h>
#include <libxml/xmlschemas.h>
int is_valid(const xmlDocPtr doc, const char *schema_filename)
{
xmlDocPtr schema_doc = xmlReadFile(schema_filename, NULL, XML_PARSE_NONET);
if (schema_doc == NULL) {
/* the schema cannot be loaded or is not well-formed */
return -1;
}
xmlSchemaParserCtxtPtr parser_ctxt = xmlSchemaNewDocParserCtxt(schema_doc);
if (parser_ctxt == NULL) {
/* unable to create a parser context for the schema */
xmlFreeDoc(schema_doc);
return -2;
}
xmlSchemaPtr schema = xmlSchemaParse(parser_ctxt);
if (schema == NULL) {
/* the schema itself is not valid */
xmlSchemaFreeParserCtxt(parser_ctxt);
xmlFreeDoc(schema_doc);
return -3;
}
xmlSchemaValidCtxtPtr valid_ctxt = xmlSchemaNewValidCtxt(schema);
if (valid_ctxt == NULL) {
/* unable to create a validation context for the schema */
xmlSchemaFree(schema);
xmlSchemaFreeParserCtxt(parser_ctxt);
xmlFreeDoc(schema_doc);
return -4;
}
int is_valid = (xmlSchemaValidateDoc(valid_ctxt, doc) == 0);
xmlSchemaFreeValidCtxt(valid_ctxt);
xmlSchemaFree(schema);
xmlSchemaFreeParserCtxt(parser_ctxt);
xmlFreeDoc(schema_doc);
/* force the return value to be non-negative on success */
return is_valid ? 1 : 0;
}
It appears that it is not exactly easy to do in Objective C, but there are several ideas listed at this S.O. question: Possible to validate xml against xsd using Objc/iPhone code at runtime
It seems CodeSynthesis supports this here : http://wiki.codesynthesis.com/Using_XSDE_in_iPhone_Applications
I am really just pulling links and ideas from the Stack Overflow question at this point, though.
There is not a general schema validator. Try using XSDE as proposed above. It is very fast and very, very reliable.
Nice tutorial is here: http://amateuritsolutions.blogspot.hu/2012/10/validate-xsd-schema-in-your-ios.html