Related
Performing a task with root-me.org, as I understand from the attack statement, it is necessary to perform a byte shift to receive is_member = true After reading some instructions, I supplement the message with a block and accordingly do xor with the previous one, but when I pass the token, the next decryption output
b'[id=546815648;name=iziziz;is_member=false;mail=blablacar;pad=00]r\xe5\xf2\x1dM\xa5\xcae\xff\x16\xa2\xc6\xbe\xd8~I'
Tell me what I could miss, my code:
var TOKEN =
"IRZjBh6GxjeYI7YZvxwfBHmxjY+Wd7bPr7s73wWwLHKaR+N8fPDIjT8/AlUIDSzniMgqCV9bJArQbec64kPYXQ=="
// [id=546815648;name=iziziz;is_member=false;mail=blablacar;pad=00]
func main() {
//block 16
tokenHex, err := base64.StdEncoding.DecodeString(TOKEN) // lenght 64
if err != nil {
log.Fatal(err)
}
block := 16
for i := 0; i < len(tokenHex); i += block {
fmt.Println(tokenHex[i : i+block])
}
tmp := make([]byte, block)
for i := 0; i < block; i++ {
tmp[i] = byte('a')
}
tokenHex = append(tokenHex,tmp...)
expected := []byte(`;is_member=true]`)
//last block
current := []byte{136 ,200 ,42 ,9 ,95 ,91, 36, 10 ,208 ,109, 231, 58, 226, 67 ,216, 93}
for i := 0; i < 16; i++ {
xor := expected[i] ^ current[i]
tokenHex[64+i] ^= xor
}
fmt.Println(base64.StdEncoding.EncodeToString(tokenHex))
}
sql
CREATE TABLE public.tiantang_page (
href varchar NOT NULL,
status int4 NOT NULL,
description varchar NOT NULL,
urls url[] NULL
);
CREATE TYPE url AS (
url varchar,
status int4);
insert composite type array
type url struct {
url string
status int
}
var urls [1]url
urls[0] = url{
url: "",
status: 0,
}
update := "UPDATE \"public\".\"tiantang_page\" SET \"urls\"=$1 where \"href\"=$2;"
r, err := db.Exec(update, pq.Array(urls),href)
if err != nil {
log.Fatal(err)
}
error
sql: converting argument $1 type: unsupported type parsetest.url, a struct
library
https://godoc.org/github.com/lib/pq
Note that custom composite types are not fully supported by lib/pq.
If all you want is to be able to store the urls then the simplest approach would be to implement the driver.Valuer interface on the url type and then use it as you do with pq.Array:
func (u url) Value() (driver.Value, error) {
return fmt.Sprintf("(%s,%d)", u.url, u.status), nil
}
// ...
r, err := db.Exec(update, pq.Array(urls), href)
more info on that can be found here: https://github.com/lib/pq/issues/544
Note that I haven't tried this with arrays, only with slices, so you may have to switch from using an array to using a slice, i.e. instead of var urls [1]url you would use var urls = make([]url, 1).
If you also want to be able to retrieve the array of urls back from the db, then you'll have to implement the sql.Scanner interface, however here the pq.Array is not very reliable and you'll have to implement the scanner on the slice type and do all the parsing yourself.
The general format of composite types is (val1, val2, ...) note that you have to put double quotes around values that contain commas or parentheses. For example to construct a value of the url type you would use the literal expression: (http://example.com,4). More info in the docs.
The format for an array of composite types is {"(val1, val2, ...)" [, ...]}, note that in this case if you need to put double quotes around the values you need to escape them. For example {"(http://example.com,4)","(\"http://example.com/?list=foo,bar,baz\",3)"}
So as you can see the more complex the data in the composite type the more complex will be the parsing as well.
Here's a crude example (does not handle quoted values):
type urlslice []url
func (s *urlslice) Scan(src interface{}) error {
var a []byte // the pq array as bytes
switch v := src.(type) {
case []byte:
a = v
case string:
a = []byte(v)
case nil:
*s = nil
return nil
default:
return fmt.Errorf("urlslice.Scan unexpected src type %T", src)
}
a = a[1 : len(a)-1] // drop curly braces
for i := 0; i < len(a); i++ {
if a[i] == '"' && (len(a) > (i+1) && a[i+1] == '(') { // element start?
i += 2 // move past `"(`
j := i // start of url.url
u := url{}
for ; i < len(a) && a[i] != ','; i++ {
}
u.url = string(a[j:i])
i += 1 // move past `,`
j = i // start of url.status
for ; i < len(a) && a[i] != ')'; i++ {
}
i64, err := strconv.ParseInt(string(a[j:i]), 10, 64)
if err != nil {
return err
}
u.status = int(i64)
*s = append(*s, u)
i += 2 // move past `)",`
}
}
return nil
}
for completeness, here's the Valuer interface implemented by the slice type, again not handling proper quoting of values that may require it:
func (s urlslice) Value() (driver.Value, error) {
data := []byte{'{'}
for _, url := range s {
data = append(data, '"', '(')
data = append(data, []byte(url.url)...)
data = append(data, ',')
data = strconv.AppendInt(data, int64(url.status), 10)
data = append(data, ')', '"', ',')
}
data[len(data)-1] = '}' // replace last ',' with '}' to close the array
return data, nil
}
With the urlslice implementing the two interfaces directly you can stop using pq.Array.
var urls = urlslice{{
url: "http://example.com",
status: 4,
}}
update := `UPDATE "public"."tiantang_page" SET "urls"=$1 where "href"=$2`
r, err := db.Exec(update, urls, href)
if err != nil {
log.Fatal(err)
}
var urls2 urlslice
selurls := `SELECT "urls" FROM "public"."tiantang_page" where "href" = $1`
if err := db.QueryRow(selurls, href).Scan(&urls2); err != nil {
log.Fatal(err)
}
Please keep in mind that both of the above examples should be considered only as hints of the direction to take in solving this problem. Not only are the two examples incomplete in that they don't handle quoted values, but they are also not very elegant implementations.
Reasonably complete composite literal parser:
type parseState int
const (
state_initial parseState = iota // start
state_value_start // no bytes read from value yet
state_value // unquoted value
state_quoted // inside quote
state_value_end // after a close quote
state_end // after close paren
)
func parseComposite(in []byte) ([]string, error) {
state := state_initial
ret := []string{}
val := []byte{}
for _, b := range in {
switch state {
case state_initial:
if b != '(' {
return nil, fmt.Errorf("initial character not ')': %v", in)
} else {
state = state_value_start
}
case state_value_start:
if b == '"' {
state = state_quoted
continue
}
fallthrough
case state_value:
if b == ',' {
ret = append(ret, string(val))
val = nil
state = state_value_start
} else if b == ')' {
ret = append(ret, string(val))
val = nil
state = state_end
} else {
val = append(val, b)
}
case state_quoted:
if b == '"' {
ret = append(ret, string(val))
val = nil
state = state_value_end
} else {
val = append(val, b)
}
case state_value_end:
if b == ',' {
state = state_value_start
} else if b == ')' {
state = state_end
} else {
return nil, fmt.Errorf("invalid delimiter after closing quote: %v", in)
}
case state_end:
return nil, fmt.Errorf("trailing bytes: %v", in)
}
}
if state != state_end {
return nil, fmt.Errorf("unterminated value: %v", in)
}
return ret, nil
}
I am struggling getting the hash of the passed file name using the blake2 crate. From the documentation:
extern crate blake2;
use blake2::{Blake2b, Digest};
use std::env;
use std::fs;
use std::io::{self, Read};
const BUFFER_SIZE: usize = 1024;
fn print_result(sum: &[u8]) {
for byte in sum {
print!("{:02x}", byte);
}
}
fn process<D: Digest + Default, R: Read>(reader: &mut R) {
let mut sh = D::default();
let mut buffer = [0u8; BUFFER_SIZE];
loop {
let n = match reader.read(&mut buffer) {
Ok(n) => n,
Err(_) => return,
};
sh.input(&buffer[..n]);
if n == 0 || n < BUFFER_SIZE {
break;
}
}
print_result(&sh.result());
}
fn main() {
let args = env::args();
if args.len() > 1 {
for path in args.skip(1) {
if let Ok(mut file) = fs::File::open(&path) {
process::<Blake2b, _>(&mut file);
}
}
} else {
process::<Blake2b, _>(&mut io::stdin());
}
}
blake-test $ cargo run hoge.txt
Compiling blake-test v0.1.0 (/Users/hoge/blake-test)
Finished dev [unoptimized + debuginfo] target(s) in 0.61s
Running `target/debug/blake-test hoge.txt`
eefea9ae6b7fb678ed54e6d58d46aed9eae6d003f29419948cdb42a44a7016dee3eb566e7e95c68ac7587d5debd516a3b195eed0db84d72819e387d687fd06a6
It can successfully print the the &[u8] slice.
However, I want to receive/return the results instead of printing them.
When you're returning a newly-created object, you have to return it as an owned value.
Borrowed references, such as &[u8] are temporary and can't exist by themselves, they're merely a views of data that has storage in an owned form elsewhere.
You can for example, call .to_vec() on the slice and return Vec<u8>.
I've searched and found a few posts relating to postgres csv imports, but nothing that solves my current problem.
I use the postgres copy command all the time to bring data from hetergeneous data sources into our system. Currently struggling with a 100-million row .csv file, comma-quote delimited. Issue is with rows like so:
009098,0981098094,"something","something else",""this one, well, is a problem"", "another thing"
Fields enclosed in double-quotes with embedded commas. The fields are not correctly parsed and I get the error:
"ERROR: extra data after last expected column"
Usually when this arises I deal with the offending rows ad hoc, but this file is so huge I'm hoping for some more general way to defend against it. Asking for a revised data format is not a possibility.
copy mytable from '/path/to/file.csv' csv header quote '"'
That's malformed CSV. You double a double quote to embed a double quote inside a quote field; for example:
"where","is ""pancakes""","house?"
has three values:
where
is "pancakes"
house?
The row you're having trouble with has stray doubled double quotes:
009098,0981098094,"something","something else",""this one, well, is a problem"", "another thing"
^^ ^^
I don't think there is anything that COPY can do about this as the correct version is ambiguous: should it be "this one, well, is a problem" or should it be """this one, well, is a problem"""?
I think you'll have to fix it by hand. A quick sed one-liner should be able to do the job if you can uniquely identify the broken row.
For reference purposes, the closest thing I've seen to a CSV standard is RFC 4180 and section two has this to say:
5. Each field may or may not be enclosed in double quotes (however
some programs, such as Microsoft Excel, do not use double quotes
at all). If fields are not enclosed with double quotes, then
double quotes may not appear inside the fields. For example:
"aaa","bbb","ccc" CRLF
zzz,yyy,xxx
[...]
7. If double-quotes are used to enclose fields, then a double-quote
appearing inside a field must be escaped by preceding it with
another double quote. For example:
"aaa","b""bb","ccc"
Here is code based on the CSV code from The Practice of Programming by Kernighan and Plauger that has been adapted to deal with your weird malformed CSV data. (It wasn't all that hard to do; I already had the main code working and packaged, so I just had to add the CSV output functions and to modify the advquoted() function to handle the weird format in this question.
csv2.h
/*
#(#)File: $RCSfile: csv2.h,v $
#(#)Version: $Revision: 2.1 $
#(#)Last changed: $Date: 2012/11/01 22:23:07 $
#(#)Purpose: Scanner for Comma Separated Variable (CSV) Data
#(#)Author: J Leffler
#(#)Origin: Kernighan & Pike, 'The Practice of Programming'
*/
/*TABSTOP=4*/
#ifndef CSV2_H
#define CSV2_H
#ifdef __cplusplus
extern "C" {
#endif
#ifdef MAIN_PROGRAM
#ifndef lint
/* Prevent over-aggressive optimizers from eliminating ID string */
const char jlss_id_csv2_h[] = "#(#)$Id: csv2.h,v 2.1 2012/11/01 22:23:07 jleffler Exp $";
#endif /* lint */
#endif /* MAIN_PROGRAM */
#include <stdio.h>
extern char *csvgetline(FILE *ifp); /* Read next input line */
extern char *csvgetfield(size_t n); /* Return field n */
extern size_t csvnfield(void); /* Return number of fields */
extern void csvreset(void); /* Release space used by CSV */
extern int csvputfield(FILE *ofp, const char *field);
extern int csvputline(FILE *ofp, char **fields, int nfields);
extern void csvseteol(const char *eol);
#ifdef __cplusplus
}
#endif
#endif /* CSV2_H */
csv2.c
/*
#(#)File: $RCSfile: csv2.c,v $
#(#)Version: $Revision: 2.1 $
#(#)Last changed: $Date: 2012/11/01 22:23:07 $
#(#)Purpose: Scanner for Comma Separated Variable (CSV) Data
#(#)Modification: Deal with specific malformed CSV
#(#)Author: J Leffler
#(#)Origin: Kernighan & Pike, 'The Practice of Programming'
*/
/*TABSTOP=4*/
#ifndef lint
/* Prevent over-aggressive optimizers from eliminating ID string */
const char jlss_id_csv2_c[] = "#(#)$Id: csv2.c,v 2.1 2012/11/01 22:23:07 jleffler Exp $";
#endif /* lint */
/*
** See RFC 4180 (http://www.ietf.org/rfc/rfc4180.txt).
**
** Specific malformed CSV - see SO 13183644 (http://stackoverflow.com/questions/13183644).
** Data contains malformed CSV fields like: OK,""this is a problem"",OK
** Two (but not three) field quotes at the start extract as "this is a problem" (with the quotes).
*/
#include "csv2.h"
#include <stdlib.h>
#include <string.h>
enum { NOMEM = -2 };
static char *line = 0; /* Input line */
static char *sline = 0; /* Split line */
static size_t maxline = 0; /* Size of line[] and sline[] */
static char **field = 0; /* Field pointers */
static size_t maxfield = 0; /* Size of field[] */
static size_t nfield = 0; /* Number of fields */
static char fieldsep[]= ","; /* Field separator characters */
static char fieldquote = '"'; /* Quote character */
static char eolstr[8] = "\n";
void csvreset(void)
{
free(line);
free(sline);
free(field);
line = 0;
sline = 0;
field = 0;
maxline = maxfield = nfield = 0;
}
static int endofline(FILE *ifp, int c)
{
int eol = (c == '\r' || c == '\n');
if (c == '\r')
{
c = getc(ifp);
if (c != '\n' && c != EOF)
ungetc(c, ifp);
}
return(eol);
}
/* Modified to deal with specific malformed CSV */
static char *advquoted(char *p)
{
size_t i;
size_t j;
if (p[0] == fieldquote && (p[1] != *fieldsep && p[1] != fieldquote))
{
/* Malformed CSV: ""some stuff"" --> "some stuff" */
/* Find "\"\"," or "\"\"\0" to mark end of field */
/* If we don't find it, drop through to 'regular' case */
char *eof = strstr(&p[2], "\"\"");
if (eof != 0 && (eof[2] == *fieldsep || eof[2] == '\0'))
{
p[eof + 1 - p] = '\0';
return(eof + 2);
}
}
for (i = j = 0; p[j] != '\0'; i++, j++)
{
if (p[j] == fieldquote && p[++j] != fieldquote)
{
size_t k = strcspn(p+j, fieldsep);
memmove(p+i, p+j, k); // 1 -> i fixing transcription error
i += k;
j += k;
break;
}
p[i] = p[j];
}
p[i] = '\0';
return(p + j);
}
static int split(void)
{
char *p;
char **newf;
char *sepp;
int sepc;
nfield = 0;
if (line[0] == '\0')
return(0);
strcpy(sline, line);
p = sline;
do
{
if (nfield >= maxfield)
{
maxfield *= 2;
newf = (char **)realloc(field, maxfield * sizeof(field[0]));
if (newf == 0)
return NOMEM;
field = newf;
}
if (*p == fieldquote)
sepp = advquoted(++p);
else
sepp = p + strcspn(p, fieldsep);
sepc = sepp[0];
sepp[0] = '\0';
field[nfield++] = p;
p = sepp + 1;
} while (sepc == ',');
return(nfield);
}
char *csvgetline(FILE *ifp)
{
size_t i;
int c;
if (line == NULL)
{
/* Allocate on first call */
maxline = maxfield = 1;
line = (char *)malloc(maxline); /*=C++=*/
sline = (char *)malloc(maxline); /*=C++-*/
field = (char **)malloc(maxfield*sizeof(field[0])); /*=C++=*/
if (line == NULL || sline == NULL || field == NULL)
{
csvreset();
return(NULL); /* out of memory */
}
}
for (i = 0; (c = getc(ifp)) != EOF && !endofline(ifp, c); i++)
{
if (i >= maxline - 1)
{
char *newl;
char *news;
maxline *= 2;
newl = (char *)realloc(line, maxline); /*=C++=*/
news = (char *)realloc(sline, maxline); /*=C++-*/
if (newl == NULL || news == NULL)
{
csvreset();
return(NULL); /* out of memory */
}
line = newl;
sline = news;
}
line[i] = c;
}
line[i] = '\0';
if (split() == NOMEM)
{
csvreset();
return(NULL);
}
return((c == EOF && i == 0) ? NULL : line);
}
char *csvgetfield(size_t n)
{
if (n >= nfield)
return(0);
return(field[n]);
}
size_t csvnfield(void)
{
return(nfield);
}
int csvputfield(FILE *ofp, const char *ofield)
{
const char escapes[] = "\",\r\n";
if (strpbrk(ofield, escapes) != 0)
{
size_t len = strlen(ofield) + 2;
const char *pos = ofield;
while ((pos = strchr(pos, '"')) != 0)
{
len++;
pos++;
}
char *space = malloc(len+1);
if (space == 0)
return EOF;
char *cpy = space;
pos = ofield;
*cpy++ = '"';
char c;
while ((c = *pos++) != '\0')
{
if (c == '"')
*cpy++ = c;
*cpy++ = c;
}
*cpy++ = '"';
*cpy = '\0';
int rc = fputs(space, ofp);
free(space);
return rc;
}
else
return fputs(ofield, ofp);
}
int csvputline(FILE *ofp, char **fields, int nfields)
{
for (int i = 0; i < nfields; i++)
{
if (i > 0)
putc(',', ofp);
if (csvputfield(ofp, fields[i]) == EOF)
return EOF;
}
return(fputs(eolstr, ofp));
}
void csvseteol(const char *eol)
{
size_t nbytes = strlen(eol);
if (nbytes >= sizeof(eolstr))
nbytes = sizeof(eolstr) - 1;
memmove(eolstr, eol, nbytes);
eolstr[nbytes] = '\0';
}
#ifdef TEST
int main(void)
{
char *in_line;
while ((in_line = csvgetline(stdin)) != 0)
{
size_t n = csvnfield();
char *fields[n]; /* C99 VLA */
printf("line = '%s'\n", in_line);
for (size_t i = 0; i < n; i++)
{
printf("field[%zu] = '%s'\n", i, csvgetfield(i));
printf("field[%zu] = [", i);
csvputfield(stdout, csvgetfield(i));
fputs("]\n", stdout);
fields[i] = csvgetfield(i);
}
printf("fields[0..%zu] = ", n-1);
csvputline(stdout, fields, n);
}
return(0);
}
#endif /* TEST */
Compile the code with -DTEST to create a program with the example main() function. You need a C99 compiler; the code in main() uses a VLA (variable length array). You could avoid that with dynamic memory allocation or with pessimistic (overkill) memory allocation (an array of a few thousand pointers isn't going to kill most systems these days, but few CSV files will have a few thousand fields per line).
Example Data
Based closely on the data in the question.
009098,0981098094,"something","something else",""this one, well, is a problem"", "another thing"
123458,1234561007,"anything","nothing else",""this one, well, is a problem"","dohicky
503458,1234598094,"nothing","everything else","""this one, well, it isn't a problem""","abelone"
610078,1236100794,"everything","anything else","this ""isn't a problem"", he said.","Orcas Rule"
Example Output
line = '009098,0981098094,"something","something else",""this one, well, is a problem"", "another thing"'
field[0] = '009098'
field[0] = [009098]
field[1] = '0981098094'
field[1] = [0981098094]
field[2] = 'something'
field[2] = [something]
field[3] = 'something else'
field[3] = [something else]
field[4] = '"this one, well, is a problem"'
field[4] = ["""this one, well, is a problem"""]
field[5] = ' "another thing"'
field[5] = [" ""another thing"""]
fields[0..5] = 009098,0981098094,something,something else,"""this one, well, is a problem"""," ""another thing"""
line = '123458,1234561007,"anything","nothing else",""this one, well, is a problem"","dohicky'
field[0] = '123458'
field[0] = [123458]
field[1] = '1234561007'
field[1] = [1234561007]
field[2] = 'anything'
field[2] = [anything]
field[3] = 'nothing else'
field[3] = [nothing else]
field[4] = '"this one, well, is a problem"'
field[4] = ["""this one, well, is a problem"""]
field[5] = 'dohicky'
field[5] = [dohicky]
fields[0..5] = 123458,1234561007,anything,nothing else,"""this one, well, is a problem""",dohicky
line = '503458,1234598094,"nothing","everything else","""this one, well, it isn't a problem""","abelone"'
field[0] = '503458'
field[0] = [503458]
field[1] = '1234598094'
field[1] = [1234598094]
field[2] = 'nothing'
field[2] = [nothing]
field[3] = 'everything else'
field[3] = [everything else]
field[4] = '"this one, well, it isn't a problem"'
field[4] = ["""this one, well, it isn't a problem"""]
field[5] = 'abelone'
field[5] = [abelone]
fields[0..5] = 503458,1234598094,nothing,everything else,"""this one, well, it isn't a problem""",abelone
line = '610078,1236100794,"everything","anything else","this ""isn't a problem"", he said.","Orcas Rule"'
field[0] = '610078'
field[0] = [610078]
field[1] = '1236100794'
field[1] = [1236100794]
field[2] = 'everything'
field[2] = [everything]
field[3] = 'anything else'
field[3] = [anything else]
field[4] = 'this "isn't a problem", he said.'
field[4] = ["this ""isn't a problem"", he said."]
field[5] = 'Orcas Rule'
field[5] = [Orcas Rule]
fields[0..5] = 610078,1236100794,everything,anything else,"this ""isn't a problem"", he said.",Orcas Rule
The fields are printed twice, once to test the field extraction, once to test the field printing. You'd simplify the output by removing the printing except for csvputline() to convert your file from malformed CSV to properly formed CSV.
I'm playing sounds for my game with openAL and I have some problems that sometimes a small glitch is played while looping. Also without looping I get a small pop...sometimes but not all.
I think it has something to do with the buffer being a little too long so there is some undefined data in the end. I just can't figure out how to change this. I'm loading a caf file with this function:
void* MyGetOpenALAudioData(CFURLRef inFileURL, ALsizei *outDataSize, ALenum *outDataFormat, ALsizei *outSampleRate, ALdouble *duration) {
OSStatus err = noErr;
SInt64 theFileLengthInFrames = 0;
AudioStreamBasicDescription theFileFormat;
UInt32 thePropertySize = sizeof(theFileFormat);
ExtAudioFileRef extRef = NULL;
void* theData = NULL;
AudioStreamBasicDescription theOutputFormat;
// Open a file with ExtAudioFileOpen()
err = ExtAudioFileOpenURL(inFileURL, &extRef);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileOpenURL FAILED, Error = %ld\n", err); goto Exit; }
// Get the audio data format
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileDataFormat, &thePropertySize, &theFileFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
if (theFileFormat.mChannelsPerFrame > 2) { printf("MyGetOpenALAudioData - Unsupported Format, channel count is greater than stereo\n"); goto Exit;}
// Set the client format to 16 bit signed integer (native-endian) data
// Maintain the channel count and sample rate of the original source format
theOutputFormat.mSampleRate = theFileFormat.mSampleRate;
theOutputFormat.mChannelsPerFrame = theFileFormat.mChannelsPerFrame;
theOutputFormat.mFormatID = kAudioFormatLinearPCM;
theOutputFormat.mBytesPerPacket = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mFramesPerPacket = 1;
theOutputFormat.mBytesPerFrame = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mBitsPerChannel = 16;
theOutputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
// Set the desired client (output) data format
err = ExtAudioFileSetProperty(extRef, kExtAudioFileProperty_ClientDataFormat, sizeof(theOutputFormat), &theOutputFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileSetProperty(kExtAudioFileProperty_ClientDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
// Get the total frame count
thePropertySize = sizeof(theFileLengthInFrames);
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileLengthFrames) FAILED, Error = %ld\n", err); goto Exit; }
// Read all the data into memory
UInt32 dataSize = theFileLengthInFrames * theOutputFormat.mBytesPerFrame;;
theData = malloc(dataSize);
if (theData)
{
AudioBufferList theDataBuffer;
theDataBuffer.mNumberBuffers = 1;
theDataBuffer.mBuffers[0].mDataByteSize = dataSize;
theDataBuffer.mBuffers[0].mNumberChannels = theOutputFormat.mChannelsPerFrame;
theDataBuffer.mBuffers[0].mData = theData;
// Read the data into an AudioBufferList
err = ExtAudioFileRead(extRef, (UInt32*)&theFileLengthInFrames, &theDataBuffer);
if(err == noErr)
{
// success
*outDataSize = (ALsizei)dataSize;
*outDataFormat = (theOutputFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
*outSampleRate = (ALsizei)theOutputFormat.mSampleRate;
}
else
{
// failure
free (theData);
theData = NULL; // make sure to return NULL
printf("MyGetOpenALAudioData: ExtAudioFileRead FAILED, Error = %ld\n", err); goto Exit;
}
}
// Alex(Colombiamug): get the file duration...
// first, get the audioID for the file...
AudioFileID audioID;
UInt32 audioIDSize = sizeof(audioID);
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_AudioFile, &audioIDSize, &audioID);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_AudioFile) FAILED, Error = %ld\n", err); goto Exit; }
//now the duration...
double soundDuration;
UInt32 durationSize = sizeof(soundDuration);
err = AudioFileGetProperty(audioID, kAudioFilePropertyEstimatedDuration, &durationSize, &soundDuration);
if(err) { printf("MyGetOpenALAudioData: AudioFileGetProperty(kAudioFilePropertyEstimatedDuration) FAILED, Error = %ld\n", err); goto Exit; }
*duration = soundDuration;
//printf("Audio duration:%f secs.\n", soundDuration);
Exit:
// Dispose the ExtAudioFileRef, it is no longer needed
if (extRef) ExtAudioFileDispose(extRef);
return theData;
}
It is part of this soundengine: SoundEngine
I have tried to put my caf file directly into the sample code and it is the same small glitch. (This caf file was doing fine with the old Apple SoundEngine.cpp but I had other issues with that so i decided to change)
Answering my own question ;)
By pure luck I must admit I tried to remove the kAudioFormatFlagIsPacked flag from this line:
theOutputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
and that fixed it.
If anybody can tell me why it could be nice to know..or if there are some problems in removing that flag I would also like to hear about it.