I have a MEAN app, and I have lots of dates stored in the mongodb, the clock is changing in the UK on 27th October, so all of the dates stored in the db need to have one hour added.
I wouldn't like to loop through all docs in the db and add an hour to its dates, I'd prefer that to be dynamic, so I'm trying to implement a query hook to check each date on the incoming docs and the timezone offset attached in the doc to add/subtract the timezone offset.
The problem is that I'd like the loop on the incoming docs to dynamically identify where the dates are, which will be sometimes on the root of the doc, buried inside an object or an array, which I'm having a hard time to model the loop to check all of that.
I'm using functions to check if the incoming object is a date, an object or an array, but mongoose is adding a bunch of objects/functions that are hindering the operation, so I'm getting RangeError: Maximum call stack size exceeded
const config = require('../config/environment');
var UK_TIMEZONE_GMT_OFFSET = config.UK_TIMEZONE_GMT_OFFSET || 0; // should check if the offset is the same as in each document in the db, if it doesn't match then I'll subtract the offset.
const mongoose = require('mongoose');
const exec = mongoose.Query.prototype.exec;
const Q = require('Q');
mongoose.Query.prototype.exec = function () {
var d = Q.defer();
var p = exec.apply(this, arguments);
if (p) p.then(function (rs) {
var mod;
try {
mod = fixDates(rs);
} catch (err) {
console.log(err, rs)
};
d.resolve(mod);
}, d.reject);
return d.promise;
}
function fixDates(rs) {
if (isArray(rs)) {
rs.forEach(function (r, i) {
rs[i] = fixDates(r);
})
} else if (isObject(rs)) {
for (var key in rs) {
var val = rs[key];
if (isObject(val)) console.log('isobject', isObject(val), val);
// the '_id' of the document is considered an object
// also some stuff like Schema, NativeCollection ... are objects as well
// rs[key] = fixDates(val); // this line causes problems
}
} else if (isDate(rs)) {
// modify the date if necessary ..
// Check the timezone offset of the document vs the global timezone offset
of the system then add/subtract the difference
}
return rs;
}
function isDate(obj) {
return obj instanceof Date;
}
function isObject(obj) {
return Object.prototype.toString.call(obj) === '[object Object]';
}
function isArray(obj) {
return Array.isArray(obj);
}
I need better methodology to deal with mongoose returned object in order to loop through all documents and deeply find the dates and modify them.
I updated the recurring function to invoke model.toObject if exists and apply the update the date fields, while still keeping the model class
function recur(rs, docTimezone) {
if (isArray(rs)) {
rs.forEach(function (r, i) {
rs[i] = recur(r, docTimezone);
})
} else if (isObject(rs)) {
if (rs.toObject) {
return updateModel(rs, recur(rs.toObject(), docTimezone))
}
Object.keys(rs).forEach(function (key) {
var val = rs[key];
rs[key] = recur(val, docTimezone !== undefined ? docTimezone : rs.usedTimezoneGMTOffset); // usedTimezoneGMTOffset if exists on the doc
})
} else if (isDate(rs)) {
// modify the date if necessary ..
rs = alignTimezone(rs, docTimezone);
}
return rs;
}
function updateModel(model, updates) {
var key, val;
for (key in updates) {
val = updates[key];
if (val !== void 0 && key !== '_id') {
model[key] = val;
}
}
return model;
}
Related
i am trying to push the resultant of the count to an array in mogodb query, while pushing it showing the array after that if print it outside of query it is showing empty array.
collection1 in db is like below
[{title:Home,
date:24-10-2016},
{title:Accesories,
date:13-02-2016}
]
my code
exports.listOfCategories=function(req,res){
collection1.find().exec(function (err, categories) {
if (err) {
return res.status(400).send({
message: errorHandler.getErrorMessage(err)
});
} else {
var categoryList = categories;
var catTitle;
var allCat = [];
// console.log(categoryList);
for (var i = 0; i < categoryList.length; i++) {
catTitle = categoryList[i].title;
contentCounts(catTitle);
function contentCounts(content, callback) {
var catName = new RegExp(content, 'i');
var mongoQuery = {
"ProCategory.title": catName
}
collection2.find(mongoQuery).count(function (err, count) {
generateContentArr(content, count)
});
}
function generateContentArr(content, count) {
allCat.push({
name: content,
count: count
});
console.log(JSON.stringify(allCat));
// Here it is showing the array what i pushed
}
}
console.log(JSON.stringify(allCat));
// Here it not showing the total array, it showing an empty array
res.json(allCat);
}
});
}
Thanks in advance
You are not waiting for the result of an async operation, in your case in the for loop you need to wait for the result of mongo operation, but as for loop is synchronous, you are just making calls to mongo but don't wait for the results, and print the empty array right after the loop.
I would suggest you to use promises instead of callbacks, I don't know which version of mongoose you are using but the last version have promise support for mongo methods like find and count. Here is an example for your case:
var Promise = require("bluebird");
function countByTitle(catTitle){
var mongoQuery = {"ProCategory.title": new RegExp(catTitle, 'i')}
return collection2.count(mongoQuery).then(function(count) {
return {
name: catTitle,
count: count
};
});
}
collection1.find().then(function (categories) {
var categoryList = categories;
var promises = [];
for (var i = 0; i < categoryList.length; i++) {
promises.push(countByTitle(categoryList[i].title));
}
return Promise.all(promises).then(results => {
console.log(JSON.stringify(results));
})
}).catch(function (err) {
//if there is any error while resolving the promises, this block will be called
return res.status(400).send({
message: errorHandler.getErrorMessage(err)
});
});
I have created a taskpane addin for word that runs a search and displays information about the results as a list to the user.
When the user clicks on an item in the list I want to select the range in word to show the user the location of the item.
The addin will then allow the user to perform additional tasks on the range, for example change the font colour.
I am able to run the search and get ranges for display using the function below:
function runSearch(textToFind) {
var items = [];
return Word.run(function(context) {
var options = Word.SearchOptions.newObject(context);
options.matchWildCards = false;
var rangesFind = context.document.body.search(textToFind, options);
context.load(rangesFind, 'text, font, style');
return context.sync().then(function() {
for (var i = 0; i < rangesFind.items.length; i++) {
items.push(rangesFind.items[i]);
context.trackedObjects.add(rangesFind.items[i]);
}
return context.sync();
});
})
.then(function() {
return items;
});
};
However I am having difficulty selecting the range on user click.
I have tried using the ranges context:
function selectRange(range){
range.select();
return range.context.sync();
}
Or using the range in a new Word.run context:
function selectRange(range){
return Word.run(function(context) {
context.load(range);
return context.sync().then(function(){
range.select();
return context.sync();
});
});
}
I have come across a potential method that involves creating a content control for each search result and then reloading all the content controls in the selectRangefunction in the new context and finding the matching control, but that seems very inefficient when I have the range already.
What is the best method for reusing a range across different Word.run contexts?
You cannot use an object across Word.run invocations. Word.run creates a new context every time that it's invoked, whereas the original object is tied to its own context, creating a mismatch.
That being said, you absolutely can, from within a Word.run, add the objects you desire to context.trackedObjects.add(obj), and they will remain as working objects even after Word.run finishes executing. By "working objects" I mean that their path will not get invalidated (think something similar to garbage collection, but for remote objects).
Once you have such object (and it looks above like you do), you should be able to call
range.select();
range.context.sync().catch(...);
If it's not working for you, can you provide an example of the error you're getting?
For completeness sake, I should note that once you add objects to the trackedObjects collection, you're effectively taking memory management of those objects into your own hands. This means that if you don't properly release the memory, you will be slowing down Word by bogging down its memory / range-adjustment chain. So once you're done using the tracked object(s), you should call obj.context.trackedObjects.remove(obj), followed by obj.context.sync(). Don't forget the last part - if you don't do a sync, your request to remove the tracked objects will not be dispatched, and you'll continue to use up the memory.
======= Update 1 =======
Tom, thanks for providing the error message. It looks like this might be a bug in the Word implementation of the APIs -- I'll follow up on that, and someone might reach out to you if there's more questions.
From a conceptual standpoint, you are absolutely on the right path -- and the following does work in Excel, for example:
var range;
Excel.run(function (ctx) {
var sheet = ctx.workbook.worksheets.getActiveWorksheet();
range = sheet.getRange("A5");
range.values = [[5]];
ctx.trackedObjects.add(range);
return ctx.sync();
})
.then(function(){
setTimeout(function() {
range.select();
range.context.trackedObjects.remove(range);
range.context.sync();
}, 2000);
})
.catch(function (error) {
showMessage("Error: " + error);
});
======= Update 2 =======
It turns out there is indeed a bug in the product. However, the good news is that it's easy to fix with a JavaScript-only fix, and in fact we'll do so in the next couple of weeks, updating the CDN.
With the fix, the following code works:
var paragraph;
Word.run(function (ctx) {
var p = ctx.document.body.paragraphs.first;
paragraph = p.next;
ctx.trackedObjects.add(paragraph);
return ctx.sync();
})
.then(function(){
setTimeout(function() {
paragraph.select();
paragraph.context.trackedObjects.remove(paragraph);
paragraph.context.sync()
.then(function() {
console.log("Done");
})
.catch(handleError);
}, 2000);
})
.catch(handleError);
function handleError (error) {
console.log('Error: ' + JSON.stringify(error));
if (error instanceof OfficeExtension.Error) {
console.log('Debug info: ' + JSON.stringify(error.debugInfo));
}
}
Want even better news? Until the CDN is updated, you can use the code below to "patch" the JavaScript library and make the code above run. You should run this code some time after Office.js has already loaded (i.e., within your Office.initialize function), and before you do a Word.run.
var TrackedObjects = (function () {
function TrackedObjects(context) {
this._autoCleanupList = {};
this.m_context = context;
}
TrackedObjects.prototype.add = function (param) {
var _this = this;
if (Array.isArray(param)) {
param.forEach(function (item) { return _this._addCommon(item, true); });
}
else {
this._addCommon(param, true);
}
};
TrackedObjects.prototype._autoAdd = function (object) {
this._addCommon(object, false);
this._autoCleanupList[object._objectPath.objectPathInfo.Id] = object;
};
TrackedObjects.prototype._addCommon = function (object, isExplicitlyAdded) {
if (object[OfficeExtension.Constants.isTracked]) {
if (isExplicitlyAdded && this.m_context._autoCleanup) {
delete this._autoCleanupList[object._objectPath.objectPathInfo.Id];
}
return;
}
var referenceId = object[OfficeExtension.Constants.referenceId];
if (OfficeExtension.Utility.isNullOrEmptyString(referenceId) && object._KeepReference) {
object._KeepReference();
OfficeExtension.ActionFactory.createInstantiateAction(this.m_context, object);
if (isExplicitlyAdded && this.m_context._autoCleanup) {
delete this._autoCleanupList[object._objectPath.objectPathInfo.Id];
}
object[OfficeExtension.Constants.isTracked] = true;
}
};
TrackedObjects.prototype.remove = function (param) {
var _this = this;
if (Array.isArray(param)) {
param.forEach(function (item) { return _this._removeCommon(item); });
}
else {
this._removeCommon(param);
}
};
TrackedObjects.prototype._removeCommon = function (object) {
var referenceId = object[OfficeExtension.Constants.referenceId];
if (!OfficeExtension.Utility.isNullOrEmptyString(referenceId)) {
var rootObject = this.m_context._rootObject;
if (rootObject._RemoveReference) {
rootObject._RemoveReference(referenceId);
}
delete object[OfficeExtension.Constants.isTracked];
}
};
TrackedObjects.prototype._retrieveAndClearAutoCleanupList = function () {
var list = this._autoCleanupList;
this._autoCleanupList = {};
return list;
};
return TrackedObjects;
}());
OfficeExtension.TrackedObjects = TrackedObjects;
Hope this helps!
~ Michael Zlatkovsky, developer on Office Extensibility team, MSFT
In addition to the TrackedObjects fix the runSearch method needed updating to get the range of the searchResult rather than using the searchResult directly.
function runSearch(textToFind) {
var items = [];
return Word.run(function(context) {
var options = Word.SearchOptions.newObject(context);
options.matchWildCards = false;
var rangesFind = context.document.body.search(textToFind, options);
context.load(rangesFind);
return context.sync().then(function() {
for (var i = 0; i < rangesFind.items.length; i++) {
var range = rangesFind.items[i].getRange();
context.load(range, 'text');
items.push(range);
context.trackedObjects.add(items[items.length-1]);
}
return context.sync();
});
})
.then(function() {
return items;
});
};
I have mongodb in which there is 3 huge collections say 'A', 'B' and 'C'
Each collection contains about 2 million documents.
There are certain properties for each of the document.
Each document need to be updated based on those values of certain properties, from which i can determine what should be the '$set' to that document.
currently i am using the same approach for each collection.
that to find all documents in batches. collection them in memory (which i think the culprit for the current approach), then one by one update them all.
For the first collection(that have similar data as in other collections), it takes 10 minutes to get completed. then the next two collections taking 2 hours approx to get the task done or mongodb client get crashed earlier.
There is something wrong and no desired in the current approach.
Model.collection.find({}).batchSize(BATCH).toArray(function(err, docs){
if(err || !docs || !docs.length)
return afterCompleteOneCollection(err);
var spec = function(index) {
if(index % 1000 === 0) console.log('at index : ' + index);
var toSet = { };
var toUnset = { };
var over = function(){
var afterOver = function(err){
if(err) return afterCompleteOneCollection(err);
if(index < docs.length - 1) spec(index+1);
else afterCompleteOneCollection(null);
};
var sb = Object.keys(toSet).length;
var ub = Object.keys(toUnset).length;
if(sb || ub) {
var all = {};
if(sb) all.$set = toSet;
if(ub) all.$unset = toUnset;
Model.collection.update({ _id : docs[index]._id }, all, {}, afterOver);
} else afterOver(null);
};
forEachOfDocument(docs[index], toSet, toUnset, over);
};
spec(0);
});
Is there any better solution for the same.?
The streaming approach from here http://mongodb.github.io/node-mongodb-native/api-generated/cursor.html#stream worked for me
This is what i am doing :
var stream = Model.collection.find().stream();
stream.on('data', function(data){
if(data){
var toSet = { };
var toUnset = { };
var over = function(){
var afterOver = function(err){
if(err) console.log(err);
};
var sb = Object.keys(toSet).length;
var ub = Object.keys(toUnset).length;
if(sb || ub) {
var all = {};
if(sb) all.$set = toSet;
if(ub) all.$unset = toUnset;
Model.collection.update({ _id : data._id }, all, {}, afterOver);
} else afterOver(null);
};
forEachOfDocument(data, toSet, toUnset, over);
}
});
stream.on('close', function() {
afterCompleteOneCollection();
});
Been playing with Lucene.NET the last two days.
After reading up on Dates, I was led to believe that Dates are best converted to Milliseconds, and stored in NumericField, with Indexing=true, and Store=No.
But now nothing ever returns - it must be something basic, but I'm just not seeing it.
The saving code is as follows:
...
else if (type == typeof (DateTime?))
{
var typedValue = (DateTime?) value;
field = numericField = new NumericField(documentFieldName, 4, Field.Store.YES, true);
long milliseconds = typedValue.HasValue?(typedValue.Value.Date.Ticks/TimeSpan.TicksPerMillisecond):0;
numericField.SetLongValue(milliseconds);
doc.Add(numericField);
}
...
else
{
field = stringField = new Field(
documentFieldName,
(value != null)?value.ToString():string.Empty,
Store.YES,
Field.Index.ANALYZED) ;
doc.Add(stringField);
}
// Write the Document to the catalog
indexWriter.AddDocument(doc);
When I query for docs against the values saved in Field ... no problem.
When I query for documents by matching against the values in NumericFields, nothing returns.
Where did I go wrong?
Thanks for your help.
Lucene.Net.Analysis.Analyzer analyzer = new Lucene.Net.Analysis.Standard.StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30);
var q2 = NumericRangeQuery.NewLongRange("Val", 3, 3, true, true);
var uxy2 = documentSearchManagementService.Search("Students", termQuery, "Id");
Using:
public ScoredDocumentResult[] Search(string indexName, Query query, params string[] hitFieldNamesToReturn)
{
if (_configuration.IndexRootDirectory.IsNullOrEmpty())
{
throw new Exception("Configuration.IndexRootDirectory has not been configued yet.");
}
indexName.ValidateIsNotNullOrEmpty("indexName");
hitFieldNamesToReturn.ValidateIsNotDefault("hitFieldNamesToReturn");
//Specify the index file location where the indexes are to be stored
string indexFileLocation = Path.Combine(_configuration.IndexRootDirectory, indexName);
Lucene.Net.Store.Directory luceneDirectory = Lucene.Net.Store.FSDirectory.Open(indexFileLocation);
IndexSearcher indexSearcher = new IndexSearcher(luceneDirectory);
TopScoreDocCollector topScoreDocCollector = TopScoreDocCollector.Create(10, true);
indexSearcher.Search(query, topScoreDocCollector);
List<ScoredDocumentResult> results = new List<ScoredDocumentResult>();
foreach (var scoreDoc in topScoreDocCollector.TopDocs(0, 10).ScoreDocs)
{
ScoredDocumentResult resultItem = new ScoredDocumentResult();
Lucene.Net.Documents.Document doc = indexSearcher.Doc(scoreDoc.Doc);
resultItem.Score = scoreDoc.Score;
List<ScoredDocumentFieldResult> fields = new List<ScoredDocumentFieldResult>();
foreach (string fieldName in hitFieldNamesToReturn)
{
string fieldValue = doc.Get(fieldName);
fields.Add(new ScoredDocumentFieldResult{Key= fieldName,Value=fieldValue});
}
resultItem.FieldValues = fields.ToArray();
results.Add(resultItem);
}
indexSearcher.Close();
return results.ToArray();
}
Hello all I'm trying to do is to get the count of each distinct departmentType:
fnMap = function() {
emit(this.departments.departmentType, {typeCount:1} );
}
fnReduce = function(key, values) {
var result = {typeCount: 0};
values.forEach(function(value) {
result.typeCount += value.brandCount;
});
return result;
};
var command = {
mapreduce : "clients",
query : {"departments.departmentType": {$exists: true}},
map : fnMap.toString(),
reduce : fnReduce.toString(),
//sort: {"departments.departmentType":1},
out: {inline: 1}
};
mongoose.connection.db.executeDbCommand(command, function(err, dbres) {
});
When executing the command, dbres.documents[0].results only contains 1 item with the total number of departmentTypes, instead of several items one for each departmentType with its count.
Any ideas what am I doing wrong?
Also, when I uncomment the SORT line, I get error "db assertion failure: could not create cursor over...", I believe the field name is written correctly.
Mongoose v3 has now a Model.mapreduce() function (see doc).
The full example shown is:
var o = {};
o.map = function () { emit(this.name, 1) }
o.reduce = function (k, vals) { return vals.length }
o.out = { replace: 'createdCollectionNameForResults' }
o.verbose = true;
User.mapReduce(o, function (err, model, stats) {
console.log('map reduce took %d ms', stats.processtime)
model.find().where('value').gt(10).exec(function (err, docs) {
console.log(docs);
});
})
The problem with count i believe is because in your fnReduce() function you are summit the results instead of displaying them in an array.
You can use:
db.clients.distinct("departments.departmentType")
That will give an array with all the distinct departmentType values.
There were two problems in your map/reduce. One is brandCount in reduce rather than typeCount. But more importantly, you are trying to emit once per document, when you need to emit once per department array element. Corrected (and slightly simplified) code:
> fnMap = function () {
this.departments.forEach(
function (d) {
emit(d.departmentType, 1);
}
);
}
> fnReduce = function (key, values) {
var result = 0;
values.forEach(
function (value) {result += value;});
return result;
}