Contact
CoCalc Logo Icon
StoreFeaturesDocsShareSupport News AboutSign UpSign In
| Download
Views: 39537
1
###############################################################################
2
#
3
# CoCalc: Collaborative web-based calculation
4
# Copyright (C) 2017, Sagemath Inc.
5
# AGPLv3
6
#
7
###############################################################################
8
9
###
10
Efficient local document-oriented database with complete history
11
recording backed by a backend database.
12
13
- set(obj) -- creates or modifies an object
14
- delete(obj) -- delets all objects matching the spec
15
- get(where) -- get immutable list of 0 or more matching objects
16
- get_one(where) -- get one matching object or undefined
17
18
This is the foundation for a distributed synchronized database.
19
20
DO **NOT** store anything that can't be converted from/to pure JSON.
21
In particular, do *NOT* store Date objects -- they will come back as
22
ISO strings and not be parsed. See https://github.com/sagemathinc/cocalc/issues/1771
23
Instead use ms since epoch (or .toISOString()) for dates. Please!!
24
###
25
26
immutable = require('immutable')
27
underscore = require('underscore')
28
29
syncstring = require('./syncstring')
30
31
misc = require('./misc')
32
33
{required, defaults} = misc
34
35
{EventEmitter} = require('events')
36
37
# Well-defined JSON.stringify...
38
json_stable = require('json-stable-stringify')
39
to_key = (s) ->
40
if immutable.Map.isMap(s)
41
s = s.toJS()
42
return json_stable(s)
43
44
exports.db_doc = (opts) ->
45
opts = defaults opts,
46
primary_keys : required
47
string_cols : []
48
if not misc.is_array(opts.primary_keys)
49
throw Error("primary_keys must be an array")
50
if not misc.is_array(opts.string_cols)
51
throw Error("_string_cols must be an array")
52
return new DBDoc(opts.primary_keys, opts.string_cols)
53
54
# Create a DBDoc from a plain javascript object
55
exports.from_obj = (opts) ->
56
opts = defaults opts,
57
obj : required
58
primary_keys : required
59
string_cols : []
60
if not misc.is_array(opts.obj)
61
throw Error("obj must be an array")
62
# Set the data
63
records = immutable.fromJS(opts.obj)
64
return new DBDoc(opts.primary_keys, opts.string_cols, records)
65
66
exports.from_str = (opts) ->
67
opts = defaults opts,
68
str : required
69
primary_keys : required
70
string_cols : []
71
if not misc.is_string(opts.str)
72
throw Error("obj must be a string")
73
obj = []
74
for line in opts.str.split('\n')
75
if line.length > 0
76
try
77
obj.push(misc.from_json(line))
78
catch e
79
console.warn("CORRUPT db-doc string: #{e} -- skipping '#{line}'")
80
return exports.from_obj(obj:obj, primary_keys:opts.primary_keys, string_cols:opts.string_cols)
81
82
# obj and change are both immutable.js Maps. Do the following:
83
# - for each value of change that is null or undefined, we delete that key from obj
84
# - we set the other vals of obj, accordingly.
85
# So this is a shallow merge with the ability to *delete* keys.
86
merge_set = (obj, change) ->
87
##return obj.merge(change).filter((v,k) => v != null)
88
change.map (v, k) ->
89
if v == null or not v?
90
obj = obj.delete(k)
91
else
92
obj = obj.set(k, v)
93
return
94
return obj
95
96
# Create an object change such that merge_set(obj1, change) produces obj2.
97
# Thus for each key, value1 of obj1 and key, value2 of obj2:
98
# If value1 is the same as value2, do nothing.
99
# If value1 exists but value2 does not, do change[key] = null
100
# If value2 exists but value1 does not, do change[key] = value2
101
map_merge_patch = (obj1, obj2) ->
102
change = {}
103
for key, val1 of obj1
104
val2 = obj2[key]
105
if underscore.isEqual(val1, val2)
106
# nothing to do
107
else if not val2?
108
change[key] = null
109
else
110
change[key] = val2
111
for key, val2 of obj2
112
if obj1[key]?
113
continue
114
change[key] = val2
115
return change
116
117
nonnull_cols = (f) ->
118
return f.filter((v,k) => v != null)
119
120
class DBDoc
121
constructor: (@_primary_keys, @_string_cols, @_records, @_everything, @_indexes, @_changes) ->
122
@_primary_keys = @_process_cols(@_primary_keys)
123
@_string_cols = @_process_cols(@_string_cols)
124
# list of records -- each is assumed to be an immutable.Map.
125
@_records ?= immutable.List()
126
# sorted set of i such that @_records.get(i) is defined.
127
@_everything ?= immutable.Set((n for n in [0...@_records.size] when @_records.get(n)?)).sort()
128
if not @_indexes?
129
# Build indexes
130
@_indexes = immutable.Map() # from field to Map
131
for field of @_primary_keys
132
@_indexes = @_indexes.set(field, immutable.Map())
133
n = 0
134
@_records.map (record, n) =>
135
@_indexes.map (index, field) =>
136
val = record.get(field)
137
if val?
138
k = to_key(val)
139
matches = index.get(k)
140
if matches?
141
matches = matches.add(n).sort()
142
else
143
matches = immutable.Set([n])
144
@_indexes = @_indexes.set(field, index.set(k, matches))
145
return
146
return
147
@size = @_everything.size
148
if not @_changes?
149
@reset_changes()
150
151
reset_changes: =>
152
@_changes = {changes: immutable.Set(), from_db:@}
153
154
# Returns object {changes: an immutable set of primary keys, from_db: db object where change tracking started}
155
changes: =>
156
return @_changes
157
158
# Given an immutable map f, return its restriction to the primary keys
159
_primary_key_cols: (f) =>
160
return f.filter((v, k) => @_primary_keys[k])
161
162
# Given an immutable map f, return its restriction to only keys that
163
# have non-null defined values.
164
_process_cols: (v) =>
165
if misc.is_array(v)
166
p = {}
167
for field in v
168
p[field] = true
169
return p
170
else if not misc.is_object(v)
171
throw Error("primary_keys must be a map or array")
172
return v
173
174
_select: (where) =>
175
if immutable.Map.isMap(where)
176
where = where.toJS()
177
# Return immutable set with defined indexes the elts of @_records that
178
# satisfy the where condition.
179
len = misc.len(where)
180
result = undefined
181
for field, value of where
182
index = @_indexes.get(field)
183
if not index?
184
throw Error("field '#{field}' must be a primary key")
185
# v is an immutable.js set or undefined
186
v = index.get(to_key(value)) # v may be undefined here, so important to do the v? check first!
187
if not v?
188
return immutable.Set() # no matches for this field - done
189
if len == 1
190
# no need to do further intersection
191
return v
192
if result?
193
# intersect with what we've found so far via indexes.
194
result = result.intersect(v)
195
else
196
result = v
197
if not result?
198
# where condition must have been empty -- matches everything
199
return @_everything
200
else
201
return result
202
203
# Used internally for determining the set/where parts of an object.
204
_parse: (obj) =>
205
if immutable.Map.isMap(obj) # it is very clean/convenient to allow this
206
obj = obj.toJS()
207
if not misc.is_object(obj)
208
throw Error("obj must be a Javascript object")
209
where = {}
210
set = {}
211
for field, val of obj
212
if @_primary_keys[field]?
213
if val?
214
where[field] = val
215
else
216
set[field] = val
217
return {where:where, set:set, obj:obj} # return obj, in case had to convert from immutable
218
219
set: (obj) =>
220
if misc.is_array(obj)
221
z = @
222
for x in obj
223
z = z.set(x)
224
return z
225
{where, set, obj} = @_parse(obj)
226
# console.log("set #{misc.to_json(set)}, #{misc.to_json(where)}")
227
matches = @_select(where)
228
{changes} = @_changes
229
n = matches?.first()
230
# TODO: very natural optimization would be be to fully support and use obj being immutable
231
if n?
232
# edit the first existing record that matches
233
before = record = @_records.get(n)
234
for field, value of set
235
if value == null # null = how to delete fields
236
record = record.delete(field)
237
else
238
if @_string_cols[field] and misc.is_array(value)
239
# special case: a string patch
240
record = record.set(field, syncstring.apply_patch(value, before.get(field) ? '')[0])
241
else
242
cur = record.get(field)
243
change = immutable.fromJS(value)
244
if immutable.Map.isMap(cur) and immutable.Map.isMap(change)
245
new_val = merge_set(cur, change)
246
else
247
new_val = change
248
record = record.set(field, new_val)
249
250
if not before.equals(record)
251
# there was an actual change, so update; doesn't change anything involving indexes.
252
changes = changes.add(@_primary_key_cols(record))
253
return new DBDoc(@_primary_keys, @_string_cols, @_records.set(n, record), @_everything, @_indexes, {changes:changes, from_db:@_changes.from_db})
254
else
255
return @
256
else
257
# The sparse array matches had nothing in it, so append a new record.
258
for field of @_string_cols
259
if obj[field]? and misc.is_array(obj[field])
260
# it's a patch -- but there is nothing to patch, so discard this field
261
obj = misc.copy_without(obj, field)
262
record = nonnull_cols(immutable.fromJS(obj)) # remove null columns (indicate delete)
263
changes = changes.add(@_primary_key_cols(record))
264
records = @_records.push(record)
265
n = records.size - 1
266
everything = @_everything.add(n)
267
# update indexes
268
indexes = @_indexes
269
for field of @_primary_keys
270
val = obj[field]
271
if val? and val != null
272
index = indexes.get(field) ? immutable.Map()
273
k = to_key(val)
274
matches = index.get(k)
275
if matches?
276
matches = matches.add(n).sort()
277
else
278
matches = immutable.Set([n])
279
indexes = indexes.set(field, index.set(k, matches))
280
return new DBDoc(@_primary_keys, @_string_cols, records, everything, indexes, {changes:changes, from_db:@_changes.from_db})
281
282
delete: (where) =>
283
if misc.is_array(where)
284
z = @
285
for x in where
286
z = z.delete(x)
287
return z
288
# console.log("delete #{misc.to_json(where)}")
289
# if where undefined, will delete everything
290
if @_everything.size == 0
291
# no-op -- no data so deleting is trivial
292
return @
293
{changes} = @_changes
294
remove = @_select(where)
295
if remove.size == @_everything.size
296
# actually deleting everything; easy special cases
297
changes = changes.union(@_records.filter((record)=>record?).map(@_primary_key_cols))
298
return new DBDoc(@_primary_keys, @_string_cols, undefined, undefined, undefined, {changes:changes, from_db:@_changes.from_db})
299
300
# remove matches from every index
301
indexes = @_indexes
302
for field of @_primary_keys
303
index = indexes.get(field)
304
if not index?
305
continue
306
remove.map (n) =>
307
record = @_records.get(n)
308
val = record.get(field)
309
if val?
310
k = to_key(val)
311
matches = index.get(k).delete(n)
312
if matches.size == 0
313
index = index.delete(k)
314
else
315
index = index.set(k, matches)
316
indexes = indexes.set(field, index)
317
return
318
319
# delete corresponding records (actually set to undefined)
320
records = @_records
321
remove.map (n) =>
322
changes = changes.add(@_primary_key_cols(records.get(n)))
323
records = records.set(n, undefined)
324
325
everything = @_everything.subtract(remove)
326
327
return new DBDoc(@_primary_keys, @_string_cols, records, everything, indexes, {changes:changes, from_db:@_changes.from_db})
328
329
# Returns immutable list of all matches
330
get: (where) =>
331
matches = @_select(where)
332
if not matches?
333
return immutable.List()
334
return @_records.filter((x,n)->matches.includes(n))
335
336
# Returns the first match, or undefined if there are no matches
337
get_one: (where) =>
338
matches = @_select(where)
339
if not matches?
340
return
341
return @_records.get(matches.first())
342
343
equals: (other) =>
344
if @_records == other._records
345
return true
346
if @size != other.size
347
return false
348
return immutable.Set(@_records).add(undefined).equals(immutable.Set(other._records).add(undefined))
349
350
# Conversion to and from an array of records, which is the primary key list followed by the normal Javascript objects
351
to_obj: =>
352
return @get().toJS()
353
354
to_str: =>
355
if @_to_str_cache? # save to cache since this is an immutable object
356
return @_to_str_cache
357
return @_to_str_cache = (misc.to_json(x) for x in @to_obj()).join('\n')
358
359
# x = javascript object
360
_primary_key_part: (x) =>
361
where = {}
362
for k, v of x
363
if @_primary_keys[k]
364
where[k] = v
365
return where
366
367
make_patch: (other) =>
368
if other.size == 0
369
# Special case -- delete everything
370
return [-1,[{}]]
371
372
t0 = immutable.Set(@_records)
373
t1 = immutable.Set(other._records)
374
# Remove the common intersection -- nothing going on there.
375
# Doing this greatly reduces the complexity in the common case in which little has changed
376
common = t0.intersect(t1).add(undefined)
377
t0 = t0.subtract(common)
378
t1 = t1.subtract(common)
379
380
# Easy very common special cases
381
if t0.size == 0
382
# Special case: t0 is empty -- insert all the records.
383
return [1, t1.toJS()]
384
if t1.size == 0
385
# Special case: t1 is empty -- bunch of deletes
386
v = []
387
t0.map (x) =>
388
v.push(@_primary_key_part(x.toJS()))
389
return
390
return [-1, v]
391
392
# compute the key parts of t0 and t1 as sets
393
# means -- set got from t0 by taking only the primary_key columns
394
k0 = t0.map(@_primary_key_cols)
395
k1 = t1.map(@_primary_key_cols)
396
397
add = []
398
remove = undefined
399
400
# Deletes: everything in k0 that is not in k1
401
deletes = k0.subtract(k1)
402
if deletes.size > 0
403
remove = deletes.toJS()
404
405
# Inserts: everything in k1 that is not in k0
406
inserts = k1.subtract(k0)
407
if inserts.size > 0
408
inserts.map (k) =>
409
add.push(other.get_one(k.toJS()).toJS())
410
return
411
412
# Everything in k1 that is also in k0 -- these must have all changed
413
changed = k1.intersect(k0)
414
if changed.size > 0
415
changed.map (k) =>
416
obj = k.toJS()
417
obj0 = @_primary_key_part(obj)
418
from = @get_one(obj0).toJS()
419
to = other.get_one(obj0).toJS()
420
# undefined for each key of from not in to
421
for k of from
422
if not to[k]?
423
obj[k] = null
424
# explicitly set each key of to that is different than corresponding key of from
425
for k, v of to
426
if not underscore.isEqual(from[k], v)
427
if @_string_cols[k] and from[k]? and v?
428
# A string patch
429
obj[k] = syncstring.make_patch(from[k], v)
430
else if misc.is_object(from[k]) and misc.is_object(v)
431
# Changing from one map to another, where they are not equal -- can use
432
# a merge to make this more efficient. This is an important optimization,
433
# to avoid making patches HUGE.
434
obj[k] = map_merge_patch(from[k], v)
435
else
436
obj[k] = v
437
add.push(obj)
438
return
439
440
patch = []
441
if remove?
442
patch.push(-1)
443
patch.push(remove)
444
if add.length > 0
445
patch.push(1)
446
patch.push(add)
447
448
return patch
449
450
apply_patch: (patch) =>
451
i = 0
452
db = @
453
while i < patch.length
454
if patch[i] == -1
455
db = db.delete(patch[i+1])
456
else if patch[i] == 1
457
db = db.set(patch[i+1])
458
i += 2
459
return db
460
461
# Return immutable set of primary keys of records that change in going from @ to other.
462
changed_keys: (other) =>
463
if @_records == other?._records # identical
464
return immutable.Set()
465
t0 = immutable.Set(@_records).filter((x) -> x?) # defined records
466
if not other?
467
return t0.map(@_primary_key_cols)
468
469
t1 = immutable.Set(other._records).filter((x) -> x?)
470
471
# Remove the common intersection -- nothing going on there.
472
# Doing this greatly reduces the complexity in the common case in which little has changed
473
common = t0.intersect(t1)
474
t0 = t0.subtract(common)
475
t1 = t1.subtract(common)
476
477
# compute the key parts of t0 and t1 as sets
478
k0 = t0.map(@_primary_key_cols)
479
k1 = t1.map(@_primary_key_cols)
480
return k0.union(k1)
481
482
class Doc
483
constructor: (@_db) ->
484
if not @_db?
485
throw Error("@_db must be defined")
486
487
to_str: =>
488
return @_db.to_str()
489
490
is_equal: (other) =>
491
return @_db.equals(other._db)
492
493
apply_patch: (patch) =>
494
#console.log("apply_patch")
495
return new Doc(@_db.apply_patch(patch))
496
497
make_patch: (other) =>
498
if not @_db? or not other?._db?
499
# not initialized or closed, etc., -- undefined means done.
500
return
501
return @_db.make_patch(other._db)
502
503
changes: =>
504
return @_db.changes()
505
506
reset_changes: =>
507
@_db.reset_changes()
508
return
509
510
get: (where) =>
511
return @_db?.get(where)
512
513
get_one: (where) =>
514
return @_db?.get_one(where)
515
516
class SyncDoc extends syncstring.SyncDoc
517
constructor: (opts) ->
518
opts = defaults opts,
519
client : required
520
project_id : undefined
521
path : undefined
522
save_interval : undefined
523
patch_interval : undefined
524
file_use_interval : undefined
525
cursors : false
526
primary_keys : required
527
string_cols : []
528
529
from_str = (str) ->
530
db = exports.from_str
531
str : str
532
primary_keys : opts.primary_keys
533
string_cols : opts.string_cols
534
return new Doc(db)
535
536
super
537
string_id : opts.id
538
client : opts.client
539
project_id : opts.project_id
540
path : opts.path
541
save_interval : opts.save_interval
542
patch_interval : opts.patch_interval
543
file_use_interval : opts.file_use_interval
544
cursors : opts.cursors
545
from_str : from_str
546
doctype :
547
type : 'db'
548
patch_format : 1
549
opts :
550
primary_keys : opts.primary_keys
551
string_cols : opts.string_cols
552
553
# TODO: obviously I should rewrite this so SyncDB just derives from SyncDoc. I didn't realize
554
# otherwise I would have to proxy all the methods.
555
class exports.SyncDB extends EventEmitter
556
constructor: (opts) ->
557
@_path = opts.path
558
if opts.change_throttle
559
# console.log("throttling on_change #{opts.throttle}")
560
@_on_change = underscore.throttle(@_on_change, opts.change_throttle)
561
delete opts.change_throttle
562
@_doc = new SyncDoc(opts)
563
# Ensure that we always emit first change event, even if it is [] (in case of empty syncdb);
564
# clients depend on this to know when the syncdb has been properly loaded.
565
@_first_change_event = true
566
@_doc.on('change', @_on_change)
567
@_doc.on('metadata-change', => @emit('metadata-change'))
568
@_doc.on('before-change', => @emit('before-change'))
569
@_doc.on('sync', => @emit('sync'))
570
if opts.cursors
571
@_doc.on('cursor_activity', (args...) => @emit('cursor_activity', args...))
572
@_doc.on('connected', => @emit('connected'))
573
@_doc.on('init', (err) => @emit('init', err))
574
@_doc.on('save_to_disk_project', (err) => @emit('save_to_disk_project', err)) # only emitted on the backend/project!
575
@setMaxListeners(100)
576
577
_check: =>
578
if not @_doc?
579
throw Error("SyncDB('#{@_path}') is closed")
580
581
has_unsaved_changes: =>
582
@_check()
583
return @_doc.has_unsaved_changes()
584
585
has_uncommitted_changes: =>
586
@_check()
587
return @_doc.has_uncommitted_changes()
588
589
is_read_only: =>
590
@_check()
591
return @_doc.get_read_only()
592
593
_on_change: =>
594
if not @_doc?
595
# This **can** happen because @_on_change is actually throttled, so
596
# definitely will sometimes get called one more time,
597
# even after this object is closed. (see the constructor above).
598
# Not rebroadcasting such change events is fine, since the object
599
# is already closed and nobody is listening.
600
# See https://github.com/sagemathinc/cocalc/issues/1829
601
return
602
db = @_doc.get_doc()._db
603
if not @_last_db?
604
# first time ever -- just get all keys
605
changes = db.changed_keys()
606
else
607
# may be able to use tracked changes...
608
{changes, from_db} = @_doc.get_doc().changes()
609
@_doc.get_doc().reset_changes()
610
if from_db != @_last_db
611
# NOPE: have to compute the hard (but rock solid and accurate) way.
612
changes = db.changed_keys(@_last_db)
613
614
if changes.size > 0 or @_first_change_event # something actually probably changed
615
@emit('change', changes)
616
@_last_db = db
617
delete @_first_change_event
618
619
close: () =>
620
if not @_doc?
621
return
622
@removeAllListeners()
623
@_doc?.close()
624
delete @_doc
625
626
is_closed: =>
627
return not @_doc?
628
629
sync: (cb) =>
630
@_check()
631
@_doc.save(cb)
632
return
633
634
save: (cb) =>
635
@_check()
636
@_doc.save_to_disk(cb)
637
return
638
639
save_asap: (cb) =>
640
@_check()
641
@_doc.save_asap(cb)
642
return
643
644
set_doc: (value) =>
645
@_check()
646
@_doc.set_doc(value)
647
return
648
649
get_doc: () =>
650
@_check()
651
return @_doc.get_doc()
652
653
get_path: =>
654
@_check()
655
return @_doc.get_path()
656
657
get_project_id: =>
658
return @_doc.get_project_id()
659
660
# change (or create) exactly *one* database entry that matches
661
# the given where criterion.
662
set: (obj, save=true) =>
663
if not @_doc?
664
return
665
@_doc.set_doc(new Doc(@_doc.get_doc()._db.set(obj)))
666
if save
667
@_doc.save()
668
@_on_change()
669
return
670
671
get: (where, time) =>
672
if not @_doc?
673
return immutable.List()
674
if time?
675
d = @_doc.version(time)
676
else
677
d = @_doc.get_doc()
678
if not d?
679
return
680
return d._db.get(where)
681
682
get_one: (where, time) =>
683
if not @_doc?
684
return
685
if time?
686
d = @_doc.version(time)
687
else
688
d = @_doc.get_doc()
689
if not d?
690
return
691
return d._db.get_one(where)
692
693
# delete everything that matches the given criterion; returns number of deleted items
694
delete: (where, save=true) =>
695
if not @_doc?
696
return
697
d = @_doc.get_doc()
698
if not d?
699
return
700
@_doc.set_doc(new Doc(d._db.delete(where)))
701
if save
702
@_doc.save()
703
@_on_change()
704
return
705
706
versions: =>
707
@_check()
708
return @_doc.versions()
709
710
last_changed: =>
711
@_check()
712
return @_doc.last_changed()
713
714
all_versions: =>
715
@_check()
716
return @_doc.all_versions()
717
718
version: (t) =>
719
@_check()
720
return @_doc.version(t)
721
722
account_id: (t) =>
723
@_check()
724
return @_doc.account_id(t)
725
726
time_sent: (t) =>
727
@_check()
728
return @_doc.time_sent(t)
729
730
show_history: (opts) =>
731
@_check()
732
return @_doc.show_history(opts)
733
734
has_full_history: =>
735
@_check()
736
return @_doc.has_full_history()
737
738
load_full_history: (cb) =>
739
@_check()
740
@_doc.load_full_history(cb)
741
742
wait_until_read_only_known: (cb) =>
743
@_check()
744
return @_doc.wait_until_read_only_known(cb)
745
746
get_read_only: =>
747
@_check()
748
return @_doc.get_read_only()
749
750
count: =>
751
@_check()
752
return @_doc.get_doc()._db.size
753
754
undo: =>
755
@_check()
756
@_doc.set_doc(@_doc.undo())
757
@_doc.save()
758
@_on_change()
759
return
760
761
redo: =>
762
@_check()
763
@_doc.set_doc(@_doc.redo())
764
@_doc.save()
765
@_on_change()
766
return
767
768
exit_undo_mode: =>
769
@_check()
770
@_doc.exit_undo_mode()
771
772
in_undo_mode: =>
773
@_check()
774
return @_doc.in_undo_mode()
775
776
revert: (version) =>
777
@_check()
778
@_doc.revert(version)
779
@_doc.save()
780
return
781
782
set_cursor_locs: (locs) =>
783
@_check()
784
@_doc.set_cursor_locs(locs)
785
return
786
787
get_cursors: =>
788
return @_doc?.get_cursors()
789
790
# Open an existing sync document -- returns instance of SyncString or SyncDB, depending
791
# on what is already in the database. Error if file doesn't exist.
792
exports.open_existing_sync_document = (opts) ->
793
opts = defaults opts,
794
client : required
795
project_id : required
796
path : required
797
cb : required
798
opts.client.query
799
query :
800
syncstrings:
801
project_id : opts.project_id
802
path : opts.path
803
doctype : null
804
cb: (err, resp) ->
805
if err
806
opts.cb(err)
807
return
808
if resp.event == 'error'
809
opts.cb(resp.error)
810
return
811
if not resp.query?.syncstrings?
812
opts.cb("no document '#{opts.path}' in project '#{opts.project_id}'")
813
return
814
doctype = JSON.parse(resp.query.syncstrings.doctype ? '{"type":"string"}')
815
opts2 =
816
project_id : opts.project_id
817
path : opts.path
818
if doctype.opts?
819
opts2 = misc.merge(opts2, doctype.opts)
820
doc = opts.client["sync_#{doctype.type}"](opts2)
821
opts.cb(undefined, doc)
822
823
824
825
826
827