Open XML: Convert a Paragraph to simple HTML - openxml

I have a Paragraph object and wish to convert the inner text it contains into HTML fragments.
I use the Open XML SDK 2.0 from Microsoft.

[Test]
public void GetHTMLOutOfParagraphsWithoutHeadingInformation()
{
var paragraphs = new List<Paragraph>();
StyleDefinitionsPart styles = null;
// Open the file read-only since we don't need to change it.
using (var wordprocessingDocument = WordprocessingDocument.Open(documentFileName, true))
{
paragraphs = wordprocessingDocument.MainDocumentPart.Document.Body
.OfType<Paragraph>().ToList();
styles = wordprocessingDocument.MainDocumentPart.StyleDefinitionsPart;
foreach (var p in paragraphs)
{
using (var memoryStream = new MemoryStream())
{
var doc = WordprocessingDocument.Create(memoryStream, WordprocessingDocumentType.Document);
doc.AddMainDocumentPart().AddPart(styles);
doc.MainDocumentPart.Document = new Document();
doc.MainDocumentPart.Document.Body = new Body();
doc.MainDocumentPart.Document.Body.Append(p.CloneNode(true));
doc.MainDocumentPart.Document.Save();
Console.WriteLine(GetHTMLOfDoc(doc));
}
}
}
}
string GetHTMLOfDoc(WordprocessingDocument doc)
{
HtmlConverterSettings settings = new HtmlConverterSettings()
{
PageTitle = "Test Title",
CssClassPrefix = "Pt",
Css = "",
ConvertFormatting = false,
};
XElement html = HtmlConverter.ConvertToHtml(doc, settings);
var notNullAnyMore = html.XPathSelectElement("//*[local-name() = 'body']");
return notNullAnyMore.ToStringNewLineOnAttributes();
}
}

Related

Fill Picture content control in header of word doc using OpenXML

I want to fill my Picture Content Control which is located in the header of my word document with this code: (I have passed content control tag and the image stream via document parameter to this function)
public void FillDocument(Stream stream, XDocument document)
{
using (WordprocessingDocument wordDocument =
WordprocessingDocument.Open(stream, true))
{
List<SdtElement> descendants = wordDocument.MainDocumentPart.Document.Descendants<SdtElement>().ToList();
foreach (var headerPart in wordDocument.MainDocumentPart.HeaderParts)
{
descendants.AddRange(headerPart.Header.Descendants<SdtElement>().ToList());
}
foreach (var footerPart in wordDocument.MainDocumentPart.FooterParts)
{
descendants.AddRange(footerPart.Footer.Descendants<SdtElement>().ToList());
}
XDocument doc = document;
foreach (SdtElement item in descendants)
{
SdtAlias alias = item.Descendants<SdtAlias>().FirstOrDefault();
if (alias != null)
{
string sdtTitle = alias.Val.Value;
//if Sdt Content Control is Picture
string imageContent = (from xElement in doc.Descendants("Picture") where xElement.Attribute("Id").Value == sdtTitle select xElement.Value).FirstOrDefault();
if (imageContent != null)
{
MemoryStream result = (MemoryStream)StringToStream(imageContent);
SdtProperties p = item.Elements<SdtProperties>().FirstOrDefault();
if (p != null)
{
// Is it a picture content control?
SdtContentPicture pict = p.Elements<SdtContentPicture>().FirstOrDefault();
// Get the alias.
SdtAlias a = p.Elements<SdtAlias>().FirstOrDefault();
if (pict != null && a.Val.Value == sdtTitle)
{
string embed = null;
Drawing dr = item.Descendants<Drawing>().FirstOrDefault();
if (dr != null)
{
D.Blip blip = dr.Descendants<D.Blip>().FirstOrDefault();
if (blip != null)
embed = blip.Embed;
if (embed != null)
{
IdPartPair idpp = wordDocument.MainDocumentPart.Parts
.Where(pa => pa.RelationshipId == embed).FirstOrDefault();
if (idpp != null)
{
ImagePart ip = (ImagePart)idpp.OpenXmlPart;
ip.FeedData(result);
}
}
}
}
}
continue;
}
}
}
It finds the content control in word document but in this line:
ImagePart ip = (ImagePart)idpp.OpenXmlPart;
I get this error:
Unable to cast object of type
‘DocumentFormat.OpenXml.Packaging.CustomXmlPart’ to type
‘DocumentFormat.OpenXml.Packaging.ImagePart’.
Could you please guide me?
I tried to find a way, here is the answer:
public void FillDocument(Stream stream, XDocument document)
{
using (WordprocessingDocument wordDocument =
WordprocessingDocument.Open(stream, true))
{
List<SdtElement> descendants = wordDocument.MainDocumentPart.Document.Descendants<SdtElement>().ToList();
foreach (HeaderPart headerPart in wordDocument.MainDocumentPart.HeaderParts)
{
descendants.AddRange(headerPart.Header.Descendants<SdtElement>().ToList());
}
foreach (var footerPart in wordDocument.MainDocumentPart.FooterParts)
{
descendants.AddRange(footerPart.Footer.Descendants<SdtElement>().ToList());
}
XDocument doc = document;
foreach (SdtElement item in descendants)
{
SdtAlias alias = item.Descendants<SdtAlias>().FirstOrDefault();
if (alias != null)
{
string sdtTitle = alias.Val.Value;
//if Sdt Content Control is Picture
string imageContent = (from xElement in doc.Descendants("Picture") where xElement.Attribute("Id").Value == sdtTitle select xElement.Value).FirstOrDefault();
if (imageContent != null)
{
MemoryStream result = (MemoryStream)StringToStream(imageContent);
D.Blip blipElement = item.Descendants<D.Blip>().FirstOrDefault();
string imageId = "default value";
if (blipElement != null)
{
imageId = blipElement.Embed.Value;
ImagePartType imagePartType = ImagePartType.Png;
//Add image and change embeded id.
ImagePart imagePart = null;
Type p = item.Parent.GetType();
switch (p.Name)
{
case "Header":
HeaderPart headerPart = ((Header)(item.Parent)).HeaderPart;
imagePart = headerPart.AddImagePart(imagePartType);
imagePart.FeedData(result);
blipElement.Embed = headerPart.GetIdOfPart(imagePart);
break;
case "Body":
MainDocumentPart mainDocumentPart = wordDocument.MainDocumentPart;
imagePart = mainDocumentPart.AddImagePart(imagePartType);
imagePart.FeedData(result);
blipElement.Embed = mainDocumentPart.GetIdOfPart(imagePart);
break;
case "Footer":
FooterPart footerPart = ((Footer)(item.Parent)).FooterPart;
imagePart = footerPart.AddImagePart(imagePartType);
imagePart.FeedData(result);
blipElement.Embed = footerPart.GetIdOfPart(imagePart);
break;
default:
break;
}
}
continue;
}
}
}
}}
It works fine and can fill the picture content control in header or footer or body!

How to group the results of a Lucene.Net search?

I have managed to create document and do some complex searching too but facing problem in grouping some search result.
There are books which are displayed after search which is fine. Along with this Author grouping with count need to done which will be based on same search query.
Example,
Author Name | Count
A | 12
B | 2
I am using Lucene.Net 3.0.3.0 which does not support grouping but there might be some work around. I need same feature with price ranges too.
Everything is possible if you write a custom Collector. What you describe are facets, and can easily be solved by counting the document values yourself. The core part is calling the IndexSearcher.Search overload accepting a collector. The collector should read values, usually implemented with a field-cache implementation and do the calculation needed.
This is a short demonstration using some classes from my demo-project Corelicious.Lucene.
var postTypes = new Dictionary<Int32, Int32>();
searcher.Search(query, new DelegatingCollector((reader, doc, scorer) => {
var score = scorer.Score();
if (score > 0) {
var postType = SingleFieldCache.Default.GetInt32(reader, "PostTypeId", doc);
if (postType.HasValue) {
if (postTypes.ContainsKey(postType.Value)) {
postTypes[postType.Value]++;
} else {
postTypes[postType.Value] = 1;
}
}
}
}));
Full code:
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text.RegularExpressions;
using System.Xml;
using Corelicious.Lucene;
using Lucene.Net.Analysis;
using Lucene.Net.Analysis.Standard;
using Lucene.Net.Documents;
using Lucene.Net.Index;
using Lucene.Net.QueryParsers;
using Lucene.Net.Search;
using Lucene.Net.Store;
using Directory = Lucene.Net.Store.Directory;
using Version = Lucene.Net.Util.Version;
namespace ConsoleApplication {
public static class Program {
public static void Main(string[] args) {
Console.WriteLine ("Creating directory...");
var directory = new RAMDirectory();
var analyzer = new StandardAnalyzer(Version.LUCENE_30);
CreateIndex(directory, analyzer);
var userQuery = "calculate pi";
var queryParser = new QueryParser(Version.LUCENE_30, "Body", analyzer);
var query = queryParser.Parse(userQuery);
Console.WriteLine("Query: '{0}'", query);
var indexReader = IndexReader.Open(directory, readOnly: true);
var searcher = new IndexSearcher(indexReader);
var postTypes = new Dictionary<Int32, Int32>();
searcher.Search(query, new DelegatingCollector((reader, doc, scorer) => {
var score = scorer.Score();
if (score > 0) {
var postType = SingleFieldCache.Default.GetInt32(reader, "PostTypeId", doc);
if (postType.HasValue) {
if (postTypes.ContainsKey(postType.Value)) {
postTypes[postType.Value]++;
} else {
postTypes[postType.Value] = 1;
}
}
}
}));
Console.WriteLine("Post type summary");
Console.WriteLine("Post type | Count");
foreach(var pair in postTypes.OrderByDescending(x => x.Value)) {
var postType = (PostType)pair.Key;
Console.WriteLine("{0,-10} | {1}", postType, pair.Value);
}
Console.ReadLine ();
}
public enum PostType {
Question = 1,
Answer = 2,
Tag = 4
}
public static void CreateIndex(Directory directory, Analyzer analyzer) {
using (var writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED))
using (var xmlStream = File.OpenRead("/Users/sisve/Downloads/Stack Exchange Data Dump - Sept 2011/Content/092011 Mathematics/posts.xml"))
using (var xmlReader = XmlReader.Create(xmlStream)) {
while (xmlReader.ReadToFollowing("row")) {
var tags = xmlReader.GetAttribute("Tags") ?? String.Empty;
var title = xmlReader.GetAttribute("Title") ?? String.Empty;
var body = xmlReader.GetAttribute("Body");
var doc = new Document();
// tags are stored as <tag1><tag2>
foreach (Match match in Regex.Matches(tags, "<(.*?)>")) {
doc.Add(new Field("Tags", match.Groups[1].Value, Field.Store.NO, Field.Index.NOT_ANALYZED));
}
doc.Add(new Field("Title", title, Field.Store.NO, Field.Index.ANALYZED));
doc.Add(new Field("Body", body, Field.Store.NO, Field.Index.ANALYZED));
doc.Add(new Field("PostTypeId", xmlReader.GetAttribute("PostTypeId"), Field.Store.NO, Field.Index.NOT_ANALYZED));
writer.AddDocument(doc);
}
writer.Optimize();
writer.Commit();
}
}
}
}

Tridion CoreService create component missing metadata

i got exception = {"Unable to find uuid:5708986b-390f-4728-b0c7-b49bd3d8f407:Metadata."}
schemaId = UpdatePubId(schemaId, containerId);
SchemaData schemaData = (SchemaData)client.Read(schemaId, null);
string xml = string.Format("<{0} xmlns=\"{1}\">{2}</{0}>", schemaData.RootElementName, schemaData.NamespaceUri, fields);
ComponentData componentData = new ComponentData
{
Content = xml,
ComponentType = ComponentType.Normal,
Title = title,
Schema = new LinkToSchemaData { IdRef = schemaId },
LocationInfo = new LocationInfo { OrganizationalItem = new LinkToOrganizationalItemData { IdRef = containerId } },
Id = "tcm:0-0-0",
MetadataSchema = schemaData.MetadataSchema,
Metadata = schemaData.Metadata
};
try
{
componentData = client.Save(componentData, new ReadOptions()) as ComponentData;
componentData = client.CheckIn(componentData.Id, new ReadOptions()) as ComponentData;
message.Set("Component", title + ", successfully");
}
catch (Exception exception)
{
message.Set("Component", exception.Message);
}
thanks Tridion experts
You are on the correct path, but the error indicates that you have not provided the Metadata fields for the component that you are trying to create.
This line is incorrect:
Metadata = schemaData.Metadata
It should pretty much, like that one where you create the content fields:
Metadata = String.Format("<Metadata xmlns=\"{0}\">{1}</Metadata>",schemaData.NamespaceUri, "YOUR METADATA XML")

Can not add bullets for word using OpenXml

My expected result is:
Hello
world!
but when i using below codes:
MainDocumentPart mainDocumentPart =
package.AddMainDocumentPart();
DocumentFormat.OpenXml.Wordprocessing.Document elementW =
new DocumentFormat.OpenXml.Wordprocessing.Document(
new Body(
new DocumentFormat.OpenXml.Wordprocessing.Paragraph(
new NumberingProperties(
new NumberingLevelReference() { Val = 0 },
new NumberingId() { Val = 1 })
),
new Run(
new RunProperties(),
new Text("Hello, ") { Space = new DocumentFormat.OpenXml.EnumValue<DocumentFormat.OpenXml.SpaceProcessingModeValues> { InnerText = "preserve" } })),
new DocumentFormat.OpenXml.Wordprocessing.Paragraph(
new ParagraphProperties(
new NumberingProperties(
new NumberingLevelReference() { Val = 0 },
new NumberingId() { Val = 1 })),
new Run(
new RunProperties(),
new Text("world!")
{
Space = new DocumentFormat.OpenXml.EnumValue<DocumentFormat.OpenXml.SpaceProcessingModeValues> { InnerText = "preserve" }
})));
elementW.Save(mainDocumentPart);
Result is:
Hello
world!
How can i get my expected result?
I realize this is far too late but maybe it can help others with the same question. The marked answer (by amurra) doesn't actually achieve the desired result. It simply creates a document with the list as content, just more completely than you. What you have added to the main document part is fine.
In the XML format, list items are defined as paragraphs with an indentation level and a numbering ID. This ID references the numbering rules defined in the NumberingDefinitionsPart of the document.
In your case, because you've set the numbering ID to be 1, the following code would map that ID of 1 to reflect a bulleted list as desired. Note the NumberingFormat and LevelText objects inside the Level object. These are the key components for your formatting.
NumberingDefinitionsPart numberingPart =
mainDocumentPart.AddNewPart<NumberingDefinitionsPart>("myCustomNumbering");
Numbering numElement = new Numbering(
new AbstractNum(
new Level(
new NumberingFormat() { Val = NumberFormatValues.Bullet },
new LevelText() { Val = "·" }
) { LevelIndex = 0 }
) { AbstractNumberId = 0 },
new NumberingInstance(
new AbstractNumId(){ Val = 0 }
){ NumberID = 1 }
);
numElement.Save(numberingPart);
For more information, check out the documentation for all the related classes on the Wordprocessing Namespace on MSDN, or the Working With Numbering markup article.
This should create you a blank document with your expected output:
// Creates an Document instance and adds its children.
public Document GenerateDocument()
{
Document document1 = new Document();
document1.AddNamespaceDeclaration("ve", "http://schemas.openxmlformats.org/markup-compatibility/2006");
document1.AddNamespaceDeclaration("o", "urn:schemas-microsoft-com:office:office");
document1.AddNamespaceDeclaration("r", "http://schemas.openxmlformats.org/officeDocument/2006/relationships");
document1.AddNamespaceDeclaration("m", "http://schemas.openxmlformats.org/officeDocument/2006/math");
document1.AddNamespaceDeclaration("v", "urn:schemas-microsoft-com:vml");
document1.AddNamespaceDeclaration("wp", "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing");
document1.AddNamespaceDeclaration("w10", "urn:schemas-microsoft-com:office:word");
document1.AddNamespaceDeclaration("w", "http://schemas.openxmlformats.org/wordprocessingml/2006/main");
document1.AddNamespaceDeclaration("wne", "http://schemas.microsoft.com/office/word/2006/wordml");
Body body1 = new Body();
Paragraph paragraph1 = new Paragraph(){ RsidParagraphAddition = "00AF4948", RsidParagraphProperties = "00625634", RsidRunAdditionDefault = "00625634" };
ParagraphProperties paragraphProperties1 = new ParagraphProperties();
ParagraphStyleId paragraphStyleId1 = new ParagraphStyleId(){ Val = "ListParagraph" };
NumberingProperties numberingProperties1 = new NumberingProperties();
NumberingLevelReference numberingLevelReference1 = new NumberingLevelReference(){ Val = 0 };
NumberingId numberingId1 = new NumberingId(){ Val = 1 };
numberingProperties1.Append(numberingLevelReference1);
numberingProperties1.Append(numberingId1);
paragraphProperties1.Append(paragraphStyleId1);
paragraphProperties1.Append(numberingProperties1);
Run run1 = new Run();
Text text1 = new Text();
text1.Text = "Hello";
run1.Append(text1);
paragraph1.Append(paragraphProperties1);
paragraph1.Append(run1);
Paragraph paragraph2 = new Paragraph(){ RsidParagraphAddition = "00625634", RsidParagraphProperties = "00625634", RsidRunAdditionDefault = "00625634" };
ParagraphProperties paragraphProperties2 = new ParagraphProperties();
ParagraphStyleId paragraphStyleId2 = new ParagraphStyleId(){ Val = "ListParagraph" };
NumberingProperties numberingProperties2 = new NumberingProperties();
NumberingLevelReference numberingLevelReference2 = new NumberingLevelReference(){ Val = 0 };
NumberingId numberingId2 = new NumberingId(){ Val = 1 };
numberingProperties2.Append(numberingLevelReference2);
numberingProperties2.Append(numberingId2);
paragraphProperties2.Append(paragraphStyleId2);
paragraphProperties2.Append(numberingProperties2);
Run run2 = new Run();
Text text2 = new Text();
text2.Text = "world!";
run2.Append(text2);
paragraph2.Append(paragraphProperties2);
paragraph2.Append(run2);
SectionProperties sectionProperties1 = new SectionProperties(){ RsidR = "00625634", RsidSect = "00AF4948" };
HeaderReference headerReference1 = new HeaderReference(){ Type = HeaderFooterValues.Even, Id = "rId7" };
HeaderReference headerReference2 = new HeaderReference(){ Type = HeaderFooterValues.Default, Id = "rId8" };
FooterReference footerReference1 = new FooterReference(){ Type = HeaderFooterValues.Even, Id = "rId9" };
FooterReference footerReference2 = new FooterReference(){ Type = HeaderFooterValues.Default, Id = "rId10" };
HeaderReference headerReference3 = new HeaderReference(){ Type = HeaderFooterValues.First, Id = "rId11" };
FooterReference footerReference3 = new FooterReference(){ Type = HeaderFooterValues.First, Id = "rId12" };
PageSize pageSize1 = new PageSize(){ Width = (UInt32Value)12240U, Height = (UInt32Value)15840U };
PageMargin pageMargin1 = new PageMargin(){ Top = 1440, Right = (UInt32Value)1440U, Bottom = 1440, Left = (UInt32Value)1440U, Header = (UInt32Value)720U, Footer = (UInt32Value)720U, Gutter = (UInt32Value)0U };
Columns columns1 = new Columns(){ Space = "720" };
DocGrid docGrid1 = new DocGrid(){ LinePitch = 360 };
sectionProperties1.Append(headerReference1);
sectionProperties1.Append(headerReference2);
sectionProperties1.Append(footerReference1);
sectionProperties1.Append(footerReference2);
sectionProperties1.Append(headerReference3);
sectionProperties1.Append(footerReference3);
sectionProperties1.Append(pageSize1);
sectionProperties1.Append(pageMargin1);
sectionProperties1.Append(columns1);
sectionProperties1.Append(docGrid1);
body1.Append(paragraph1);
body1.Append(paragraph2);
body1.Append(sectionProperties1);
document1.Append(body1);
return document1;
}

multiple file upload using html5 drag-and-drop fails as multiple files get same content

I need to transfer all the files dropped on an element to a server using HTML5 drag and drop.
I provided the corresponding js code below. I have a servlet in the server side to collect the files and put it in a folder. This is working fine if I drop 1 or 2 files on the page. But, if i drop 4-10 files, all the files are getting created in the server but multiple files are set to same content and some files content is 0K.
Can any of you please tell me how to achieve the correct behavior.
My requirement is similar to gmail attachments!!
Any solution which makes a sequential upload of files is much appreciable.
/*
* Upload files to the server using HTML 5 Drag and drop from the folders on your local computer
*/
function uploader(place, status, target, show) {
// Upload image files
upload = function(file) {
// Firefox 3.6, Chrome 6, WebKit
if(window.FileReader) {
// Once the process of reading file
this.loadEnd = function() {
bin = reader.result;
xhr = new XMLHttpRequest();
xhr.open('POST', target+'?up=true', false);
var body = bin;
xhr.setRequestHeader('content-type', 'multipart/form-data;');
xhr.setRequestHeader("file-name", file.name );
xhr.setRequestHeader("mime-type", file.type );
// Firefox 3.6 provides a feature sendAsBinary ()
if(xhr.sendAsBinary != null) {
xhr.sendAsBinary(body);
// Chrome 7 sends data but you must use the base64_decode on the PHP side
} else {
xhr.open('POST', target+'?up=true&base64=true', true);
xhr.setRequestHeader('UP-FILENAME', file.name);
xhr.setRequestHeader('UP-SIZE', file.size);
xhr.setRequestHeader('UP-TYPE', file.type);
xhr.send(window.btoa(bin));
}
if (show) {
var newFile = document.createElement('div');
newFile.innerHTML = 'Loaded : '+file.name+' size '+file.size+' B';
document.getElementById(show).appendChild(newFile);
}
if (status) {
document.getElementById(status).innerHTML = 'Loaded : 100%<br/>Next file ...';
}
};
// Loading errors
this.loadError = function(event) {
switch(event.target.error.code) {
case event.target.error.NOT_FOUND_ERR:
document.getElementById(status).innerHTML = 'File not found!';
break;
case event.target.error.NOT_READABLE_ERR:
document.getElementById(status).innerHTML = 'File not readable!';
break;
case event.target.error.ABORT_ERR:
break;
default:
document.getElementById(status).innerHTML = 'Read error.';
}
};
// Reading Progress
this.loadProgress = function(event) {
if (event.lengthComputable) {
var percentage = Math.round((event.loaded * 100) / event.total);
document.getElementById(status).innerHTML = 'Loaded : '+percentage+'%';
}
};
// Preview images
this.previewNow = function(event) {
bin = preview.result;
var img = document.createElement("img");
img.className = 'addedIMG';
img.file = file;
img.src = bin;
document.getElementById(show).appendChild(img);
};
reader = new FileReader();
// Firefox 3.6, WebKit
if(reader.addEventListener) {
reader.addEventListener('loadend', this.loadEnd, false);
if (status != null)
{
reader.addEventListener('error', this.loadError, false);
reader.addEventListener('progress', this.loadProgress, false);
}
// Chrome 7
} else {
reader.onloadend = this.loadEnd;
if (status != null)
{
reader.onerror = this.loadError;
reader.onprogress = this.loadProgress;
}
}
var preview = new FileReader();
// Firefox 3.6, WebKit
if(preview.addEventListener) {
preview.addEventListener('loadend', this.previewNow, false);
// Chrome 7
} else {
preview.onloadend = this.previewNow;
}
// The function that starts reading the file as a binary string
reader.readAsBinaryString(file);
// Preview uploaded files
if (show) {
preview.readAsDataURL(file);
}
// Safari 5 does not support FileReader
} else {
xhr = new XMLHttpRequest();
xhr.open('POST', target+'?up=true', true);
xhr.setRequestHeader('UP-FILENAME', file.name);
xhr.setRequestHeader('UP-SIZE', file.size);
xhr.setRequestHeader('UP-TYPE', file.type);
xhr.send(file);
if (status) {
document.getElementById(status).innerHTML = 'Loaded : 100%';
}
if (show) {
var newFile = document.createElement('div');
newFile.innerHTML = 'Loaded : '+file.name+' size '+file.size+' B';
document.getElementById(show).appendChild(newFile);
}
}
};
// Function drop file
this.drop = function(event) {
event.preventDefault();
var dt = event.dataTransfer;
var files = dt.files;
for (var i = 0; i<files.length; i++) {
var file = files[i];
upload(file);
}
};
// The inclusion of the event listeners (DragOver and drop)
this.uploadPlace = document.getElementById(place);
this.uploadPlace.addEventListener("dragover", function(event) {
event.stopPropagation();
event.preventDefault();
}, true);
this.uploadPlace.addEventListener("drop", this.drop, false);
}
Thank you.
I spent sometimes this morning in analyzing the same code from html5uploader. With some lucks, I found the root cause.
Change reader = new FileReader(); to var reader = new FileReader(); should solve the issue.
I bet this is because javascripts behaviour of auto-declaring undeclared variable as global variable. This caused the reader variable being reused by all the uploade(file) calls when more than one file is dropped to the browser.
Cheers!