Contact
CoCalc Logo Icon
StoreFeaturesDocsShareSupport News AboutSign UpSign In
| Download
Views: 39538
1
###
2
User (and project) client queries
3
4
COPYRIGHT : (c) 2017 SageMath, Inc.
5
LICENSE : AGPLv3
6
###
7
8
MAX_CHANGEFEEDS_PER_CLIENT = 2000
9
10
# Reject all patches that have timestamp that is more than 3 minutes in the future.
11
MAX_PATCH_FUTURE_MS = 1000*60*3
12
13
EventEmitter = require('events')
14
async = require('async')
15
underscore = require('underscore')
16
17
{PostgreSQL, one_result, all_results, count_result, pg_type} = require('./postgres')
18
{quote_field} = require('./postgres-base')
19
20
{defaults} = misc = require('smc-util/misc')
21
required = defaults.required
22
23
{PROJECT_UPGRADES, SCHEMA} = require('smc-util/schema')
24
25
class exports.PostgreSQL extends PostgreSQL
26
27
user_query: (opts) =>
28
opts = defaults opts,
29
account_id : undefined
30
project_id : undefined
31
query : required
32
options : [] # used for initial query; **IGNORED** by changefeed!;
33
# - Use [{set:true}] or [{set:false}] to force get or set query
34
# - For a set query, use {delete:true} to delete instead of set. This is the only way
35
# to delete a record, and won't work unless delete:true is set in the schema
36
# for the table to explicitly allow deleting.
37
changes : undefined # id of change feed
38
cb : undefined # cb(err, result) # WARNING -- this *will* get called multiple times when changes is true!
39
dbg = @_dbg("user_query(...)")
40
if misc.is_array(opts.query)
41
@_user_query_array(opts)
42
return
43
44
subs =
45
'{account_id}' : opts.account_id
46
'{project_id}' : opts.project_id
47
'{now}' : new Date()
48
49
if opts.changes?
50
changes =
51
id : opts.changes
52
cb : opts.cb
53
54
v = misc.keys(opts.query)
55
if v.length > 1
56
opts.cb?('must specify exactly one key in the query')
57
return
58
table = v[0]
59
query = opts.query[table]
60
if misc.is_array(query)
61
if query.length > 1
62
opts.cb?("array of length > 1 not yet implemented")
63
return
64
multi = true
65
query = query[0]
66
else
67
multi = false
68
is_set_query = undefined
69
if opts.options?
70
if not misc.is_array(opts.options)
71
opts.cb?("options (=#{misc.to_json(opts.options)}) must be an array")
72
return
73
for x in opts.options
74
if x.set?
75
is_set_query = !!x.set
76
options = (x for x in opts.options when not x.set?)
77
else
78
options = []
79
80
if misc.is_object(query)
81
query = misc.deep_copy(query)
82
misc.obj_key_subs(query, subs)
83
if not is_set_query?
84
is_set_query = not misc.has_null_leaf(query)
85
if is_set_query
86
# do a set query
87
if changes
88
opts.cb?("changefeeds only for read queries")
89
return
90
if not opts.account_id? and not opts.project_id?
91
opts.cb?("no anonymous set queries")
92
return
93
@user_set_query
94
account_id : opts.account_id
95
project_id : opts.project_id
96
table : table
97
query : query
98
options : opts.options
99
cb : (err, x) =>
100
opts.cb?(err, {"#{table}":x})
101
else
102
# do a get query
103
if changes and not multi
104
opts.cb?("changefeeds only implemented for multi-document queries")
105
return
106
107
if changes
108
try
109
@_inc_changefeed_count(opts.account_id, opts.project_id, table, changes.id)
110
catch err
111
opts.cb?(err)
112
return
113
114
@user_get_query
115
account_id : opts.account_id
116
project_id : opts.project_id
117
table : table
118
query : query
119
options : options
120
multi : multi
121
changes : changes
122
cb : (err, x) =>
123
if err and changes
124
# didn't actually make the changefeed, so don't count it.
125
@_dec_changefeed_count(changes.id, table)
126
opts.cb?(err, if not err then {"#{table}":x})
127
else
128
opts.cb?("invalid user_query of '#{table}' -- query must be an object")
129
130
###
131
TRACK CHANGEFEED COUNTS
132
###
133
134
# Increment a count of the number of changefeeds by a given client so we can cap it.
135
_inc_changefeed_count: (account_id, project_id, table, changefeed_id) =>
136
client_name = "#{account_id}-#{project_id}"
137
cnt = @_user_get_changefeed_counts ?= {}
138
ids = @_user_get_changefeed_id_to_user ?= {}
139
if not cnt[client_name]?
140
cnt[client_name] = 1
141
else if cnt[client_name] >= MAX_CHANGEFEEDS_PER_CLIENT
142
throw Error("user may create at most #{MAX_CHANGEFEEDS_PER_CLIENT} changefeeds; please close files, refresh browser, restart project")
143
else
144
# increment before successfully making get_query to prevent huge bursts causing trouble!
145
cnt[client_name] += 1
146
@_dbg("_inc_changefeed_count(table='#{table}')")("{#{client_name}:#{cnt[client_name]} ...}")
147
ids[changefeed_id] = client_name
148
149
# Corresonding decrement of count of the number of changefeeds by a given client.
150
_dec_changefeed_count: (id, table) =>
151
client_name = @_user_get_changefeed_id_to_user[id]
152
if client_name?
153
@_user_get_changefeed_counts?[client_name] -= 1
154
delete @_user_get_changefeed_id_to_user[id]
155
cnt = @_user_get_changefeed_counts
156
if table?
157
t = "(table='#{table}')"
158
else
159
t = ""
160
@_dbg("_dec_changefeed_count#{t}")("counts={#{client_name}:#{cnt[client_name]} ...}")
161
162
# Handle user_query when opts.query is an array. opts below are as for user_query.
163
_user_query_array: (opts) =>
164
if opts.changes and opts.query.length > 1
165
opts.cb("changefeeds only implemented for single table")
166
return
167
result = []
168
f = (query, cb) =>
169
@user_query
170
account_id : opts.account_id
171
project_id : opts.project_id
172
query : query
173
options : opts.options
174
cb : (err, x) =>
175
result.push(x); cb(err)
176
async.mapSeries(opts.query, f, (err) => opts.cb(err, result))
177
178
user_query_cancel_changefeed: (opts) =>
179
opts = defaults opts,
180
id : required
181
cb : undefined # not really asynchronous
182
dbg = @_dbg("user_query_cancel_changefeed(id='#{opts.id}')")
183
feed = @_changefeeds?[opts.id]
184
if feed?
185
dbg("actually cancelling feed")
186
@_dec_changefeed_count(opts.id)
187
delete @_changefeeds[opts.id]
188
feed.close()
189
else
190
dbg("already cancelled before (no such feed)")
191
opts.cb?()
192
193
_query_is_cmp: (obj) =>
194
if not misc.is_object(obj)
195
return false
196
for k, _ of obj
197
if k not in misc.operators
198
return false
199
return k
200
return false
201
202
_user_get_query_columns: (query) =>
203
return misc.keys(query)
204
205
_require_is_admin: (account_id, cb) =>
206
if not account_id?
207
cb("user must be an admin")
208
return
209
@is_admin
210
account_id : account_id
211
cb : (err, is_admin) =>
212
if err
213
cb(err)
214
else if not is_admin
215
cb("user must be an admin")
216
else
217
cb()
218
219
# Ensure that each project_id in project_ids is such that the account is in one of the given
220
# groups for the project, or that the account is an admin. If not, cb(err).
221
_require_project_ids_in_groups: (account_id, project_ids, groups, cb) =>
222
s = {"#{account_id}": true}
223
require_admin = false
224
@_query
225
query : "SELECT project_id, users#>'{#{account_id}}' AS user FROM projects"
226
where : "project_id = ANY($)":project_ids
227
cache : true
228
cb : all_results (err, x) =>
229
if err
230
cb(err)
231
else
232
known_project_ids = {} # we use this to ensure that each of the given project_ids exists.
233
for p in x
234
known_project_ids[p.project_id] = true
235
if p.user?.group not in groups
236
require_admin = true
237
# If any of the project_ids don't exist, reject the query.
238
for project_id in project_ids
239
if not known_project_ids[project_id]
240
cb("unknown project_id '#{misc.trunc(project_id,100)}'")
241
return
242
if require_admin
243
@_require_is_admin(account_id, cb)
244
else
245
cb()
246
247
_query_parse_options: (options) =>
248
r = {}
249
for x in options
250
for name, value of x
251
switch name
252
when 'limit'
253
r.limit = value
254
when 'slice'
255
r.slice = value
256
when 'order_by'
257
if value[0] == '-'
258
value = value.slice(1) + " DESC "
259
r.order_by = value
260
when 'delete'
261
null
262
# ignore delete here - is parsed elsewhere
263
when 'heartbeat'
264
@_dbg("_query_parse_options")("TODO/WARNING -- ignoring heartbeat option from old client")
265
else
266
r.err = "unknown option '#{name}'"
267
return r
268
269
###
270
SET QUERIES
271
###
272
_parse_set_query_opts: (opts) =>
273
r = {}
274
275
if opts.project_id?
276
dbg = r.dbg = @_dbg("user_set_query(project_id='#{opts.project_id}', table='#{opts.table}')")
277
else if opts.account_id?
278
dbg = r.dbg = @_dbg("user_set_query(account_id='#{opts.account_id}', table='#{opts.table}')")
279
else
280
return {err:"account_id or project_id must be specified"}
281
282
if not SCHEMA[opts.table]?
283
return {err:"table '#{opts.table}' does not exist"}
284
285
dbg(misc.to_json(opts.query))
286
287
if opts.options
288
dbg("options=#{misc.to_json(opts.options)}")
289
290
r.query = misc.copy(opts.query)
291
r.table = opts.table
292
r.db_table = SCHEMA[opts.table].virtual ? opts.table
293
r.account_id = opts.account_id
294
r.project_id = opts.project_id
295
296
s = SCHEMA[opts.table]
297
298
if opts.account_id?
299
r.client_query = s?.user_query
300
else
301
r.client_query = s?.project_query
302
303
if not r.client_query?.set?.fields?
304
return {err:"user set queries not allowed for table '#{opts.table}'"}
305
306
if not @_mod_fields(opts.query, r.client_query)
307
dbg("shortcut -- no fields will be modified, so nothing to do")
308
return
309
310
for field in misc.keys(r.client_query.set.fields)
311
if r.client_query.set.fields[field] == undefined
312
return {err: "user set query not allowed for #{opts.table}.#{field}"}
313
val = r.client_query.set.fields[field]
314
if typeof(val) == 'function'
315
try
316
r.query[field] = val(r.query, @)
317
catch err
318
return {err:"error setting '#{field}' -- #{err}"}
319
else
320
switch val
321
when 'account_id'
322
if not r.account_id?
323
return {err: "account_id must be specified"}
324
r.query[field] = r.account_id
325
when 'project_id'
326
if not r.project_id?
327
return {err: "project_id must be specified"}
328
r.query[field] = r.project_id
329
when 'time_id'
330
r.query[field] = uuid.v1()
331
when 'project_write'
332
if not r.query[field]?
333
return {err: "must specify #{opts.table}.#{field}"}
334
r.require_project_ids_write_access = [r.query[field]]
335
when 'project_owner'
336
if not r.query[field]?
337
return {err:"must specify #{opts.table}.#{field}"}
338
r.require_project_ids_owner = [r.query[field]]
339
340
if r.client_query.set.admin
341
r.require_admin = true
342
343
r.primary_keys = @_primary_keys(r.db_table)
344
345
r.json_fields = @_json_fields(r.db_table, r.query)
346
347
for k, v of r.query
348
if k in r.primary_keys
349
continue
350
if r.client_query?.set?.fields?[k] != undefined
351
continue
352
if s.admin_query?.set?.fields?[k] != undefined
353
r.require_admin = true
354
continue
355
return {err: "changing #{r.table}.#{k} not allowed"}
356
357
# HOOKS which allow for running arbitrary code in response to
358
# user set queries. In each case, new_val below is only the part
359
# of the object that the user requested to change.
360
361
# 0. CHECK: Runs before doing any further processing; has callback, so this
362
# provides a generic way to quickly check whether or not this query is allowed
363
# for things that can't be done declaratively. The check_hook can also
364
# mutate the obj (the user query), e.g., to enforce limits on input size.
365
r.check_hook = r.client_query.set.check_hook
366
367
# 1. BEFORE: If before_change is set, it is called with input
368
# (database, old_val, new_val, account_id, cb)
369
# before the actual change to the database is made.
370
r.before_change_hook = r.client_query.set.before_change
371
372
# 2. INSTEAD OF: If instead_of_change is set, then instead_of_change_hook
373
# is called with input
374
# (database, old_val, new_val, account_id, cb)
375
# *instead* of actually doing the update/insert to
376
# the database. This makes it possible to run arbitrary
377
# code whenever the user does a certain type of set query.
378
# Obviously, if that code doesn't set the new_val in the
379
# database, then new_val won't be the new val.
380
r.instead_of_change_hook = r.client_query.set.instead_of_change
381
382
# 3. AFTER: If set, the on_change_hook is called with
383
# (database, old_val, new_val, account_id, cb)
384
# after everything the database has been modified.
385
r.on_change_hook = r.client_query.set.on_change
386
387
#dbg("on_change_hook=#{on_change_hook?}, #{misc.to_json(misc.keys(client_query.set))}")
388
389
# Set the query options -- order doesn't matter for set queries (unlike for get), so we
390
# just merge the options into a single dictionary.
391
# NOTE: As I write this, there is just one supported option: {delete:true}.
392
r.options = {}
393
if r.client_query.set.options?
394
for x in r.client_query.set.options
395
for y, z of x
396
r.options[y] = z
397
if opts.options?
398
for x in opts.options
399
for y, z of x
400
r.options[y] = z
401
dbg("options = #{misc.to_json(r.options)}")
402
403
if r.options.delete and not r.client_query.set.delete
404
# delete option is set, but deletes aren't explicitly allowed on this table. ERROR.
405
return {err: "delete from #{r.table} not allowed"}
406
407
return r
408
409
_user_set_query_enforce_requirements: (r, cb) =>
410
async.parallel([
411
(cb) =>
412
if r.require_admin
413
@_require_is_admin(r.account_id, cb)
414
else
415
cb()
416
(cb) =>
417
if r.require_project_ids_write_access?
418
if r.project_id?
419
err = undefined
420
for x in r.require_project_ids_write_access
421
if x != r.project_id
422
err = "can only query same project"
423
break
424
cb(err)
425
else
426
@_require_project_ids_in_groups(r.account_id, r.require_project_ids_write_access,\
427
['owner', 'collaborator'], cb)
428
else
429
cb()
430
(cb) =>
431
if r.require_project_ids_owner?
432
@_require_project_ids_in_groups(r.account_id, r.require_project_ids_owner,\
433
['owner'], cb)
434
else
435
cb()
436
], cb)
437
438
_user_set_query_where: (r) =>
439
where = {}
440
for primary_key in @_primary_keys(r.db_table)
441
type = pg_type(SCHEMA[r.db_table].fields[primary_key])
442
value = r.query[primary_key]
443
if type == 'TIMESTAMP' and not misc.is_date(value)
444
# Javascript is better at parsing its own dates than PostgreSQL
445
value = new Date(value)
446
where["#{primary_key}=$::#{type}"] = value
447
return where
448
449
_user_set_query_values: (r) =>
450
values = {}
451
s = SCHEMA[r.db_table]
452
for key, value of r.query
453
type = pg_type(s?.fields?[key])
454
if type?
455
if type == 'TIMESTAMP' and not misc.is_date(value)
456
# (as above) Javascript is better at parsing its own dates than PostgreSQL
457
value = new Date(value)
458
values["#{key}::#{type}"] = value
459
else
460
values[key] = value
461
return values
462
463
_user_set_query_hooks_prepare: (r, cb) =>
464
if r.on_change_hook? or r.before_change_hook? or r.instead_of_change_hook?
465
for primary_key in r.primary_keys
466
if not r.query[primary_key]?
467
cb("query must specify (primary) key '#{primary_key}'")
468
return
469
# get the old value before changing it
470
# TODO: optimization -- can we restrict columns below?
471
@_query
472
query : "SELECT * FROM #{r.db_table}"
473
where : @_user_set_query_where(r)
474
cb : one_result (err, x) =>
475
r.old_val = x; cb(err)
476
else
477
cb()
478
479
_user_query_set_count: (r, cb) =>
480
@_query
481
query : "SELECT COUNT(*) FROM #{r.db_table}"
482
where : @_user_set_query_where(r)
483
cb : count_result(cb)
484
485
_user_query_set_delete: (r, cb) =>
486
@_query
487
query : "DELETE FROM #{r.db_table}"
488
where : @_user_set_query_where(r)
489
cb : cb
490
491
_user_set_query_conflict: (r) =>
492
return r.primary_keys
493
494
_user_query_set_upsert: (r, cb) =>
495
@_query
496
query : "INSERT INTO #{r.db_table}"
497
values : @_user_set_query_values(r)
498
conflict : @_user_set_query_conflict(r)
499
cb : cb
500
501
# Record is already in DB, so we update it:
502
# this function handles a case that involves both
503
# a jsonb_merge and an update.
504
_user_query_set_upsert_and_jsonb_merge: (r, cb) =>
505
jsonb_merge = {}
506
for k of r.json_fields
507
v = r.query[k]
508
if v?
509
jsonb_merge[k] = v
510
set = {}
511
for k, v of r.query
512
if v? and k not in r.primary_keys and not jsonb_merge[k]?
513
set[k] = v
514
@_query
515
query : "UPDATE #{r.db_table}"
516
jsonb_merge : jsonb_merge
517
set : set
518
where : @_user_set_query_where(r)
519
cb : cb
520
521
_user_set_query_main_query: (r, cb) =>
522
if r.instead_of_change_hook?
523
r.instead_of_change_hook(@, r.old_val, r.query, r.account_id, cb)
524
else if r.options.delete
525
for primary_key in r.primary_keys
526
if not r.query[primary_key]?
527
cb("delete query must set primary key")
528
return
529
r.dbg("delete based on primary key")
530
@_user_query_set_delete(r, cb)
531
else
532
if misc.len(r.json_fields) == 0
533
# easy case -- there are no jsonb merge fields; just do an upsert.
534
@_user_query_set_upsert(r, cb)
535
return
536
# HARD CASE -- there are json_fields... so we are doing an insert
537
# if the object isn't already in the database, and an update
538
# if it is. This is ugly because I don't know how to do both
539
# a JSON merge as an upsert.
540
cnt = undefined # will equal number of records having the primary key (so 0 or 1)
541
async.series([
542
(cb) =>
543
@_user_query_set_count r, (err, n) =>
544
cnt = n; cb(err)
545
(cb) =>
546
if cnt == 0
547
# Just insert (do as upsert to avoid error in case of race)
548
@_user_query_set_upsert(r, cb)
549
else
550
# Do as an update -- record is definitely already in db since cnt > 0.
551
# This would fail in the unlikely (but possible) case that somebody deletes
552
# the record between the above count and when we do the UPDATE.
553
# Using a transaction could avoid this.
554
# Maybe such an error is reasonable and it's good to report it as such.
555
@_user_query_set_upsert_and_jsonb_merge(r, cb)
556
], cb)
557
558
user_set_query: (opts) =>
559
opts = defaults opts,
560
account_id : undefined
561
project_id : undefined
562
table : required
563
query : required
564
options : undefined # {delete:true} is the only supported option
565
cb : required # cb(err)
566
r = @_parse_set_query_opts(opts)
567
#r.dbg("parsed query opts = #{misc.to_json(r)}")
568
if not r? # nothing to do
569
opts.cb()
570
return
571
if r.err
572
opts.cb(r.err)
573
return
574
575
async.series([
576
(cb) =>
577
@_user_set_query_enforce_requirements(r, cb)
578
(cb) =>
579
if r.check_hook?
580
r.check_hook(@, r.query, r.account_id, r.project_id, cb)
581
else
582
cb()
583
(cb) =>
584
@_user_set_query_hooks_prepare(r, cb)
585
(cb) =>
586
if r.before_change_hook?
587
r.before_change_hook(@, r.old_val, r.query, r.account_id, cb)
588
else
589
cb()
590
(cb) =>
591
@_user_set_query_main_query(r, cb)
592
(cb) =>
593
if r.on_change_hook?
594
r.on_change_hook(@, r.old_val, r.query, r.account_id, cb)
595
else
596
cb()
597
], (err) => opts.cb(err))
598
599
# mod_fields counts the fields in query that might actually get modified
600
# in the database when we do the query; e.g., account_id won't since it gets
601
# filled in with the user's account_id, and project_write won't since it must
602
# refer to an existing project. We use mod_field **only** to skip doing
603
# no-op queries. It's just an optimization.
604
_mod_fields: (query, client_query) =>
605
for field in misc.keys(query)
606
if client_query.set.fields[field] not in ['account_id', 'project_write']
607
return true
608
return false
609
610
_user_get_query_json_timestamps: (obj, fields) =>
611
# obj is an object returned from the database via a query
612
# Postgres JSONB doesn't support timestamps, so we convert
613
# every json leaf node of obj that looks like JSON of a timestamp
614
# to a Javascript Date.
615
for k, v of obj
616
if fields[k]
617
obj[k] = misc.fix_json_dates(v, fields[k])
618
619
# fill in the default values for obj using the client_query spec.
620
_user_get_query_set_defaults: (client_query, obj, fields) =>
621
if not misc.is_array(obj)
622
obj = [obj]
623
else if obj.length == 0
624
return
625
s = client_query?.get?.fields ? {}
626
for k in fields
627
v = s[k]
628
if v?
629
# k is a field for which a default value (=v) is provided in the schema
630
for x in obj
631
# For each obj pulled from the database that is defined...
632
if x?
633
# We check to see if the field k was set on that object.
634
y = x[k]
635
if not y?
636
# It was NOT set, so we deep copy the default value for the field k.
637
x[k] = misc.deep_copy(v)
638
else if typeof(v) == 'object' and typeof(y) == 'object' and not misc.is_array(v)
639
# y *is* defined and is an object, so we merge in the provided defaults.
640
for k0, v0 of v
641
if not y[k0]?
642
y[k0] = v0
643
644
_user_set_query_project_users: (obj, account_id) =>
645
dbg = @_dbg("_user_set_query_project_users")
646
if not obj.users?
647
# nothing to do -- not changing users.
648
return
649
##dbg("disabled")
650
##return obj.users
651
# - ensures all keys of users are valid uuid's (though not that they are valid users).
652
# - and format is:
653
# {group:'owner' or 'collaborator', hide:bool, upgrades:{a map}}
654
# with valid upgrade fields.
655
upgrade_fields = PROJECT_UPGRADES.params
656
users = {}
657
# TODO: we obviously should check that a user is only changing the part
658
# of this object involving themselves... or adding/removing collaborators.
659
# That is not currently done below. TODO TODO TODO SECURITY.
660
for id, x of obj.users
661
if misc.is_valid_uuid_string(id)
662
for key in misc.keys(x)
663
if key not in ['group', 'hide', 'upgrades', 'ssh_keys']
664
throw Error("unknown field '#{key}")
665
if x.group? and (x.group not in ['owner', 'collaborator'])
666
throw Error("invalid value for field 'group'")
667
if x.hide? and typeof(x.hide) != 'boolean'
668
throw Error("invalid type for field 'hide'")
669
if x.upgrades?
670
if not misc.is_object(x.upgrades)
671
throw Error("invalid type for field 'upgrades'")
672
for k,_ of x.upgrades
673
if not upgrade_fields[k]
674
throw Error("invalid upgrades field '#{k}'")
675
if x.ssh_keys
676
# do some checks.
677
if not misc.is_object(x.ssh_keys)
678
throw Error("ssh_keys must be an object")
679
for fingerprint, key of x.ssh_keys
680
if not key # deleting
681
continue
682
if not misc.is_object(key)
683
throw Error("each key in ssh_keys must be an object")
684
for k, v of key
685
# the two dates are just numbers not actual timestamps...
686
if k not in ['title', 'value', 'creation_date', 'last_use_date']
687
throw Error("invalid ssh_keys field '#{k}'")
688
users[id] = x
689
return users
690
691
project_action: (opts) =>
692
opts = defaults opts,
693
project_id : required
694
action_request : required # action is a pair
695
cb : required
696
if opts.action_request.action == 'test'
697
# used for testing -- shouldn't trigger anything to happen.
698
opts.cb()
699
return
700
dbg = @_dbg("project_action(project_id=#{opts.project_id},action_request=#{misc.to_json(opts.action_request)})")
701
dbg()
702
project = undefined
703
action_request = misc.copy(opts.action_request)
704
set_action_request = (cb) =>
705
dbg("set action_request to #{misc.to_json(action_request)}")
706
@_query
707
query : "UPDATE projects"
708
where : 'project_id = $::UUID':opts.project_id
709
jsonb_set : {action_request : action_request}
710
cb : cb
711
async.series([
712
(cb) =>
713
action_request.started = new Date()
714
set_action_request(cb)
715
(cb) =>
716
dbg("get project")
717
@compute_server.project
718
project_id : opts.project_id
719
cb : (err, x) =>
720
project = x; cb(err)
721
(cb) =>
722
dbg("doing action")
723
switch action_request.action
724
when 'save'
725
project.save
726
min_interval : 1 # allow frequent explicit save (just an rsync)
727
cb : cb
728
when 'restart'
729
project.restart
730
cb : cb
731
when 'stop'
732
project.stop
733
cb : cb
734
when 'start'
735
project.start
736
cb : cb
737
when 'close'
738
project.close
739
cb : cb
740
else
741
cb("action '#{opts.action_request.action}' not implemented")
742
], (err) =>
743
if err
744
action_request.err = err
745
action_request.finished = new Date()
746
dbg("finished!")
747
set_action_request()
748
)
749
750
# This hook is called *before* the user commits a change to a project in the database
751
# via a user set query.
752
# TODO: Add a pre-check here as well that total upgrade isn't going to be exceeded.
753
# This will avoid a possible subtle edge case if user is cheating and always somehow
754
# crashes server...?
755
_user_set_query_project_change_before: (old_val, new_val, account_id, cb) =>
756
dbg = @_dbg("_user_set_query_project_change_before #{account_id}, #{misc.to_json(old_val)} --> #{misc.to_json(new_val)}")
757
dbg()
758
759
if new_val?.action_request? and (new_val.action_request.time - (old_val?.action_request?.time ? 0) != 0)
760
# Requesting an action, e.g., save, restart, etc.
761
dbg("action_request -- #{misc.to_json(new_val.action_request)}")
762
#
763
# WARNING: Above, we take the difference of times below, since != doesn't work as we want with
764
# separate Date objects, as it will say equal dates are not equal. Example:
765
# coffee> x = JSON.stringify(new Date()); {from_json}=require('misc'); a=from_json(x); b=from_json(x); [a!=b, a-b]
766
# [ true, 0 ]
767
768
# Launch the action -- success or failure communicated back to all clients through changes to state.
769
# Also, we don't have to worry about permissions here; that this function got called at all means
770
# the user has write access to the projects table entry with given project_id, which gives them permission
771
# to do any action with the project.
772
@project_action
773
project_id : new_val.project_id
774
action_request : misc.copy_with(new_val.action_request, ['action', 'time'])
775
cb : (err) =>
776
dbg("action_request #{misc.to_json(new_val.action_request)} completed -- #{err}")
777
cb()
778
return
779
780
if not new_val.users? # not changing users
781
cb(); return
782
old_val = old_val?.users ? {}
783
new_val = new_val?.users ? {}
784
for id in misc.keys(old_val).concat(new_val)
785
if account_id != id
786
# make sure user doesn't change anybody else's allocation
787
if not underscore.isEqual(old_val?[id]?.upgrades, new_val?[id]?.upgrades)
788
err = "user '#{account_id}' tried to change user '#{id}' allocation toward a project"
789
dbg(err)
790
cb(err)
791
return
792
cb()
793
794
# This hook is called *after* the user commits a change to a project in the database
795
# via a user set query. It could undo changes the user isn't allowed to make, which
796
# might require doing various async calls, or take actions (e.g., setting quotas,
797
# starting projects, etc.).
798
_user_set_query_project_change_after: (old_val, new_val, account_id, cb) =>
799
dbg = @_dbg("_user_set_query_project_change_after #{account_id}, #{misc.to_json(old_val)} --> #{misc.to_json(new_val)}")
800
dbg()
801
old_upgrades = old_val.users?[account_id]?.upgrades
802
new_upgrades = new_val.users?[account_id]?.upgrades
803
if new_upgrades? and not underscore.isEqual(old_upgrades, new_upgrades)
804
dbg("upgrades changed for #{account_id} from #{misc.to_json(old_upgrades)} to #{misc.to_json(new_upgrades)}")
805
project = undefined
806
async.series([
807
(cb) =>
808
@ensure_user_project_upgrades_are_valid
809
account_id : account_id
810
cb : cb
811
(cb) =>
812
if not @compute_server?
813
cb()
814
else
815
dbg("get project")
816
@compute_server.project
817
project_id : new_val.project_id
818
cb : (err, p) =>
819
project = p; cb(err)
820
(cb) =>
821
if not project?
822
cb()
823
else
824
dbg("determine total quotas and apply")
825
project.set_all_quotas(cb:cb)
826
], cb)
827
else
828
cb()
829
830
###
831
GET QUERIES
832
###
833
834
# Make any functional substitutions defined by the schema.
835
# This may mutate query in place.
836
_user_get_query_functional_subs: (query, fields) =>
837
if fields?
838
for field, val of fields
839
if typeof(val) == 'function'
840
query[field] = val(query, @)
841
842
_parse_get_query_opts: (opts) =>
843
if opts.changes? and not opts.changes.cb?
844
return {err: "user_get_query -- if opts.changes is specified, then opts.changes.cb must also be specified"}
845
846
r = {}
847
# get data about user queries on this table
848
if opts.project_id?
849
r.client_query = SCHEMA[opts.table]?.project_query
850
else
851
r.client_query = SCHEMA[opts.table]?.user_query
852
853
if not r.client_query?.get?
854
return {err: "get queries not allowed for table '#{opts.table}'"}
855
856
if not opts.account_id? and not opts.project_id? and not SCHEMA[opts.table].anonymous
857
return {err: "anonymous get queries not allowed for table '#{opts.table}'"}
858
859
r.table = SCHEMA[opts.table].virtual ? opts.table
860
861
r.primary_keys = @_primary_keys(opts.table)
862
863
# Are only admins allowed any get access to this table?
864
r.require_admin = !!r.client_query.get.admin
865
866
# Verify that all requested fields may be read by users
867
for field in misc.keys(opts.query)
868
if r.client_query.get.fields?[field] == undefined
869
return {err: "user get query not allowed for #{opts.table}.#{field}"}
870
871
# Functional substitutions defined by schema
872
@_user_get_query_functional_subs(opts.query, r.client_query.get?.fields)
873
874
if r.client_query.get?.instead_of_query?
875
return r
876
877
# Make sure there is the query that gets only things in this table that this user
878
# is allowed to see, or at least a check_hook.
879
if not r.client_query.get.pg_where? and not r.client_query.get.check_hook?
880
return {err: "user get query not allowed for #{opts.table} (no getAll filter)"}
881
882
# Apply default options to the get query (don't impact changefeed)
883
# The user can overide these, e.g., if they were to want to explicitly increase a limit
884
# to get more file use history.
885
r.delete_option = false # will be true if an option is delete
886
user_options = {}
887
for x in opts.options
888
for y, z of x
889
if y == 'delete'
890
r.delete_option = z
891
else
892
user_options[y] = true
893
894
if r.client_query.get.options?
895
# complicated since options is a list of {opt:val} !
896
for x in r.client_query.get.options
897
for y, z of x
898
if y == 'delete'
899
r.delete_option = z
900
else
901
if not user_options[y]
902
opts.options.push(x)
903
break
904
905
if opts.changes? and r.delete_option
906
return {err: "user_get_query -- if opts.changes is specified, then delete option must not be specified"}
907
908
r.json_fields = @_json_fields(opts.table, opts.query)
909
910
return r
911
912
# _json_fields: map from field names to array of fields that should be parsed as timestamps
913
# These keys of his map are also used by _user_query_set_upsert_and_jsonb_merge to determine
914
# JSON deep merging for set queries.
915
_json_fields: (table, query) =>
916
json_fields = {}
917
for field, info of SCHEMA[table].fields
918
if (query[field]? or query[field] == null) and (info.type == 'map' or info.pg_type == 'JSONB')
919
json_fields[field] = info.date ? []
920
return json_fields
921
922
_user_get_query_where: (client_query, account_id, project_id, user_query, table, cb) =>
923
dbg = @_dbg("_user_get_query_where")
924
dbg()
925
926
pg_where = client_query.get.pg_where
927
if not pg_where?
928
pg_where = []
929
if pg_where == 'projects'
930
pg_where = ['projects']
931
932
if typeof(pg_where) == 'function'
933
pg_where = pg_where(user_query, @)
934
if not misc.is_array(pg_where)
935
cb("pg_where must be an array (of strings or objects)")
936
return
937
938
# Do NOT mutate the schema itself!
939
pg_where = misc.deep_copy(pg_where)
940
941
# expand 'projects' in query, depending on whether project_id is specified or not.
942
# This is just a convenience to make the db schema simpler.
943
for i in [0...pg_where.length]
944
if pg_where[i] == 'projects'
945
if user_query.project_id
946
pg_where[i] = {"project_id = $::UUID" : 'project_id'}
947
else
948
pg_where[i] = {"project_id = ANY(select project_id from projects where users ? $::TEXT)" : 'account_id'}
949
950
# Now we fill in all the parametrized substitions in the pg_where list.
951
subs = {}
952
for x in pg_where
953
if misc.is_object(x)
954
for key, value of x
955
subs[value] = value
956
957
sub_value = (value, cb) =>
958
switch value
959
when 'account_id'
960
if not account_id?
961
cb('account_id must be given')
962
return
963
subs[value] = account_id
964
cb()
965
when 'project_id'
966
if project_id?
967
subs[value] = project_id
968
cb()
969
else if not user_query.project_id
970
cb("must specify project_id")
971
else if SCHEMA[table].anonymous
972
subs[value] = user_query.project_id
973
cb()
974
else
975
@user_is_in_project_group
976
account_id : account_id
977
project_id : user_query.project_id
978
groups : ['owner', 'collaborator']
979
cb : (err, in_group) =>
980
if err
981
cb(err)
982
else if in_group
983
subs[value] = user_query.project_id
984
cb()
985
else
986
cb("you do not have read access to this project")
987
when 'project_id-public'
988
if not user_query.project_id?
989
cb("must specify project_id")
990
else
991
if SCHEMA[table].anonymous
992
@has_public_path
993
project_id : user_query.project_id
994
cb : (err, has_public_path) =>
995
if err
996
cb(err)
997
else if not has_public_path
998
cb("project does not have any public paths")
999
else
1000
subs[value] = user_query.project_id
1001
cb()
1002
else
1003
cb("table must allow anonymous queries")
1004
else
1005
cb()
1006
1007
async.map misc.keys(subs), sub_value, (err) =>
1008
if err
1009
cb(err)
1010
return
1011
for x in pg_where
1012
if misc.is_object(x)
1013
for key, value of x
1014
x[key] = subs[value]
1015
1016
# impose further restrictions (more where conditions)
1017
pg_where.push(@_user_get_query_filter(user_query, client_query))
1018
1019
cb(undefined, pg_where)
1020
1021
# Additional where object condition imposed by user's get query
1022
_user_get_query_filter: (user_query, client_query) =>
1023
# If the schema lists the value in a get query as 'null', then we remove it;
1024
# nulls means it was only there to be used by the initial where filter
1025
# part of the query.
1026
for field, val of client_query.get.fields
1027
if val == 'null'
1028
delete user_query[field]
1029
1030
where = {}
1031
for field, val of user_query
1032
if val?
1033
if @_query_is_cmp(val)
1034
# A comparison, e.g.,
1035
# field :
1036
# '<=' : 5
1037
# '>=' : 2
1038
for op, v of val
1039
if op == '==' # not in SQL, but natural for our clients to use it
1040
op = '='
1041
where["#{quote_field(field)} #{op} $"] = v
1042
else
1043
where["#{quote_field(field)} = $"] = val
1044
1045
return where
1046
1047
_user_get_query_options: (delete_option, options, multi, schema_options) =>
1048
r = {}
1049
1050
if schema_options?
1051
options = options.concat(schema_options)
1052
1053
# Parse option part of the query
1054
{limit, order_by, slice, err} = @_query_parse_options(options)
1055
1056
if err
1057
return {err: err}
1058
if limit?
1059
r.limit = limit
1060
else if not multi
1061
r.limit = 1
1062
if order_by?
1063
r.order_by = order_by
1064
if slice?
1065
return {err: "slice not implemented"}
1066
return r
1067
1068
_user_get_query_do_query: (query_opts, client_query, user_query, multi, json_fields, cb) =>
1069
query_opts.cb = all_results (err, x) =>
1070
if err
1071
cb(err)
1072
else
1073
if misc.len(json_fields) > 0
1074
# Convert timestamps to Date objects, if **explicitly** specified in the schema
1075
for obj in x
1076
@_user_get_query_json_timestamps(obj, json_fields)
1077
1078
if not multi
1079
x = x[0]
1080
# Fill in default values and remove null's
1081
@_user_get_query_set_defaults(client_query, x, misc.keys(user_query))
1082
# Get rid of undefined fields -- that's the default and wastes memory and bandwidth
1083
if x?
1084
for obj in x
1085
misc.map_mutate_out_undefined(obj)
1086
cb(undefined, x)
1087
@_query(query_opts)
1088
1089
_user_get_query_query: (delete_option, table, user_query) =>
1090
if delete_option
1091
return "DELETE FROM #{table}"
1092
else
1093
return "SELECT #{(quote_field(field) for field in @_user_get_query_columns(user_query)).join(',')} FROM #{table}"
1094
1095
_user_get_query_satisfied_by_obj: (user_query, obj, possible_time_fields) =>
1096
for field, value of obj
1097
date_keys = possible_time_fields[field]
1098
if date_keys
1099
value = misc.fix_json_dates(value, date_keys)
1100
if (q = user_query[field])?
1101
if (op = @_query_is_cmp(q))
1102
x = q[op]
1103
switch op
1104
when '=='
1105
if value != x
1106
return false
1107
when '!='
1108
if value == x
1109
return false
1110
when '>='
1111
if value < x
1112
return false
1113
when '<='
1114
if value > x
1115
return false
1116
when '>'
1117
if value <= x
1118
return false
1119
when '<'
1120
if value >= x
1121
return false
1122
else if value != q
1123
return false
1124
return true
1125
1126
_user_get_query_changefeed: (changes, table, primary_keys, user_query,
1127
where, json_fields, account_id, client_query, cb) =>
1128
dbg = @_dbg("_user_get_query_changefeed(table='#{table}')")
1129
dbg()
1130
if not misc.is_object(changes)
1131
cb("changes must be an object with keys id and cb")
1132
return
1133
if not misc.is_valid_uuid_string(changes.id)
1134
cb("changes.id must be a uuid")
1135
return
1136
if typeof(changes.cb) != 'function'
1137
cb("changes.cb must be a function")
1138
return
1139
for primary_key in primary_keys
1140
if not user_query[primary_key]? and user_query[primary_key] != null
1141
cb("changefeed MUST include primary key (='#{primary_key}') in query")
1142
return
1143
watch = []
1144
select = {}
1145
init_tracker = tracker = undefined
1146
possible_time_fields = misc.deep_copy(json_fields)
1147
1148
for field, val of user_query
1149
type = pg_type(SCHEMA[table]?.fields?[field])
1150
if type == 'TIMESTAMP'
1151
possible_time_fields[field] = 'all'
1152
if val == null and field not in primary_keys
1153
watch.push(field)
1154
else
1155
select[field] = type
1156
1157
if misc.len(possible_time_fields) > 0
1158
# Convert (likely) timestamps to Date objects.
1159
process = (x) =>
1160
if not x?
1161
return
1162
if x.new_val?
1163
@_user_get_query_json_timestamps(x.new_val, possible_time_fields)
1164
@_user_get_query_set_defaults(client_query, x.new_val, misc.keys(user_query))
1165
if x.old_val?
1166
@_user_get_query_json_timestamps(x.old_val, possible_time_fields)
1167
else
1168
process = -> # no-op
1169
1170
async.series([
1171
(cb) =>
1172
# check for alternative where test for changefeed.
1173
pg_changefeed = client_query?.get?.pg_changefeed
1174
if not pg_changefeed?
1175
cb(); return
1176
if pg_changefeed == 'projects'
1177
pg_changefeed = (db, account_id) =>
1178
where : (obj) =>
1179
# Check that this is a project we have read access to
1180
if not db._project_and_user_tracker?.projects(account_id)[obj.project_id]
1181
return false
1182
# Now check our actual query conditions on the object.
1183
# This would normally be done by the changefeed, but since
1184
# we are passing in a custom where, we have to do it.
1185
if not @_user_get_query_satisfied_by_obj(user_query, obj, possible_time_fields)
1186
return false
1187
return true
1188
1189
select : {'project_id':'UUID'}
1190
1191
init_tracker : (tracker, feed) =>
1192
tracker.on 'add_user_to_project', (x) =>
1193
if x.account_id == account_id
1194
feed.insert({project_id:x.project_id})
1195
tracker.on 'remove_user_from_project', (x) =>
1196
if x.account_id == account_id
1197
feed.delete({project_id:x.project_id})
1198
1199
if pg_changefeed == 'one-hour'
1200
pg_changefeed = ->
1201
where : (obj) ->
1202
if obj.time?
1203
return new Date(obj.time) >= misc.hours_ago(1)
1204
else
1205
return true
1206
select : {id:'UUID', time:'TIMESTAMP'}
1207
1208
if pg_changefeed == 'collaborators'
1209
if not account_id?
1210
cb("account_id must be given")
1211
return
1212
pg_changefeed = (db, account_id) ->
1213
shared_tracker = undefined
1214
where : (obj) -> # client side test of "is a collab with me"
1215
return shared_tracker.collabs(account_id)?[obj.account_id]
1216
init_tracker : (tracker, feed) =>
1217
shared_tracker = tracker
1218
tracker.on 'add_collaborator', (x) =>
1219
if x.account_id == account_id
1220
feed.insert({account_id:x.collab_id})
1221
tracker.on 'remove_collaborator', (x) =>
1222
if x.account_id == account_id
1223
feed.delete({account_id:x.collab_id})
1224
1225
1226
x = pg_changefeed(@, account_id)
1227
if x.init_tracker?
1228
init_tracker = x.init_tracker
1229
if x.select?
1230
for k, v of x.select
1231
select[k] = v
1232
1233
if x.where? or x.init_tracker?
1234
where = x.where
1235
if not account_id?
1236
cb()
1237
return
1238
# initialize user tracker is needed for where tests...
1239
@project_and_user_tracker cb : (err, _tracker) =>
1240
if err
1241
cb(err)
1242
else
1243
tracker = _tracker
1244
tracker.register(account_id: account_id, cb:cb)
1245
else
1246
cb()
1247
(cb) =>
1248
@changefeed
1249
table : table
1250
select : select
1251
where : where
1252
watch : watch
1253
cb : (err, feed) =>
1254
if err
1255
cb(err)
1256
return
1257
feed.on 'change', (x) ->
1258
process(x)
1259
changes.cb(undefined, x)
1260
feed.on 'close', ->
1261
changes.cb(undefined, {action:'close'})
1262
feed.on 'error', (err) ->
1263
changes.cb("feed error - #{err}")
1264
@_changefeeds ?= {}
1265
@_changefeeds[changes.id] = feed
1266
init_tracker?(tracker, feed)
1267
# Any tracker error means this changefeed is now broken and
1268
# has to be recreated.
1269
tracker?.on 'error', (err) ->
1270
changes.cb("tracker error - #{err}")
1271
cb()
1272
], cb)
1273
1274
user_get_query: (opts) =>
1275
opts = defaults opts,
1276
account_id : undefined
1277
project_id : undefined
1278
table : required
1279
query : required
1280
multi : required
1281
options : required # used for initial query; **IGNORED** by changefeed,
1282
# which ensures that *something* is sent every n minutes, in case no
1283
# changes are coming out of the changefeed. This is an additional
1284
# measure in case the client somehow doesn't get a "this changefeed died" message.
1285
# Use [{delete:true}] to instead delete the selected records (must
1286
# have delete:true in schema).
1287
changes : undefined # {id:?, cb:?}
1288
cb : required # cb(err, result)
1289
###
1290
The general idea is that user get queries are of the form
1291
1292
SELECT [columns] FROM table WHERE [get_all] AND [further restrictions] LIMIT/slice
1293
1294
Using the whitelist rules specified in SCHEMA, we
1295
determine each of the above, then run the query.
1296
1297
If no error in query, and changes is a given uuid, set up a change
1298
feed that calls opts.cb on changes as well.
1299
###
1300
dbg = @_dbg("user_get_query(table='#{opts.table}')")
1301
dbg("account_id='#{opts.account_id}', project_id='#{opts.project_id}', query=#{misc.to_json(opts.query)}, multi=#{opts.multi}, options=#{misc.to_json(opts.options)}, changes=#{misc.to_json(opts.changes)}")
1302
{err, table, client_query, require_admin, delete_option, primary_keys, json_fields} = @_parse_get_query_opts(opts)
1303
1304
if err
1305
opts.cb(err)
1306
return
1307
if client_query.get.instead_of_query?
1308
# Custom version: instead of doing a full query, we instead
1309
# call a function and that's it.
1310
client_query.get.instead_of_query(@, opts.query, opts.account_id, opts.cb)
1311
return
1312
1313
_query_opts = {} # this will be the input to the @_query command.
1314
result = undefined
1315
async.series([
1316
(cb) =>
1317
if client_query.get.check_hook?
1318
client_query.get.check_hook(@, opts.query, opts.account_id, opts.project_id, cb)
1319
else
1320
cb()
1321
(cb) =>
1322
if require_admin
1323
@_require_is_admin(opts.account_id, cb)
1324
else
1325
cb()
1326
(cb) =>
1327
# NOTE: _user_get_query_where may mutate opts.query (for 'null' params)
1328
# so it is important that this is called before @_user_get_query_query below.
1329
# See the TODO in @_user_get_query_filter.
1330
@_user_get_query_where client_query, opts.account_id, opts.project_id, opts.query, opts.table, (err, where) =>
1331
_query_opts.where = where
1332
cb(err)
1333
(cb) =>
1334
_query_opts.query = @_user_get_query_query(delete_option, table, opts.query)
1335
x = @_user_get_query_options(delete_option, opts.options, opts.multi, client_query.options)
1336
if x.err
1337
cb(x.err)
1338
return
1339
misc.merge(_query_opts, x)
1340
1341
if opts.changes?
1342
@_user_get_query_changefeed(opts.changes, table, primary_keys,
1343
opts.query, _query_opts.where, json_fields,
1344
opts.account_id, client_query, cb)
1345
else
1346
cb()
1347
(cb) =>
1348
@_user_get_query_do_query _query_opts, client_query, opts.query, opts.multi, json_fields, (err, x) =>
1349
result = x; cb(err)
1350
], (err) =>
1351
opts.cb(err, result if not err)
1352
)
1353
1354
###
1355
Synchronized strings
1356
###
1357
_user_set_query_syncstring_change_after: (old_val, new_val, account_id, cb) =>
1358
dbg = @_dbg("_user_set_query_syncstring_change_after")
1359
cb() # return immediately -- stuff below can happen as side effect in the background.
1360
# Now do the following reactions to this syncstring change in the background:
1361
# 1. Awaken the relevant project.
1362
project_id = old_val?.project_id ? new_val?.project_id
1363
if project_id? and (new_val?.save?.state == 'requested' or (new_val?.last_active? and new_val?.last_active != old_val?.last_active))
1364
dbg("awakening project #{project_id}")
1365
awaken_project(@, project_id)
1366
1367
# 2. Log that this particular file is being used/accessed; this is used only
1368
# longterm for analytics. Note that log_file_access is throttled.
1369
# Also, record in a local cache that the user has permission to write
1370
# to this syncstring.
1371
if project_id? and new_val?.last_active?
1372
filename = old_val?.path
1373
if filename? and account_id?
1374
@log_file_access
1375
project_id : project_id
1376
account_id : account_id
1377
filename : filename
1378
1379
# Verify that writing a patch is allowed.
1380
_user_set_query_patches_check: (obj, account_id, project_id, cb) =>
1381
# Reject any patch that is too new
1382
if obj.time - new Date() > MAX_PATCH_FUTURE_MS
1383
cb("clock") # this exact error is assumed in synctable!
1384
return
1385
# Write access
1386
@_syncstring_access_check(obj.string_id, account_id, project_id, cb)
1387
1388
# Verify that writing a patch is allowed.
1389
_user_get_query_patches_check: (obj, account_id, project_id, cb) =>
1390
# Write access (no notion of read only yet -- will be easy to add later)
1391
@_syncstring_access_check(obj.string_id, account_id, project_id, cb)
1392
1393
# Verify that writing a patch is allowed.
1394
_user_set_query_cursors_check: (obj, account_id, project_id, cb) =>
1395
@_syncstring_access_check(obj.string_id, account_id, project_id, cb)
1396
1397
# Verify that writing a patch is allowed.
1398
_user_get_query_cursors_check: (obj, account_id, project_id, cb) =>
1399
@_syncstring_access_check(obj.string_id, account_id, project_id, cb)
1400
1401
_syncstring_access_check: (string_id, account_id, project_id, cb) =>
1402
# Check that string_id is the id of a syncstring the given account_id or
1403
# project_id is allowed to write to. NOTE: We do not concern ourselves (for now at least)
1404
# with proof of identity (i.e., one user with full read/write access to a project
1405
# claiming they are another users of that project), since our security model
1406
# is that any user of a project can edit anything there. In particular, the
1407
# synctable lets any user with write access to the project edit the users field.
1408
if string_id?.length != 40
1409
cb("string_id (='#{string_id}') must be a string of length 40")
1410
return
1411
@_query
1412
query : "SELECT project_id FROM syncstrings"
1413
where : "string_id = $::CHAR(40)" : string_id
1414
cache : true
1415
cb : one_result 'project_id', (err, x) =>
1416
if err
1417
cb(err)
1418
else if not x
1419
# There is no such syncstring with this id -- fail
1420
cb("no such syncstring")
1421
else if account_id?
1422
# Attempt to write by a user browser client
1423
@_require_project_ids_in_groups(account_id, [x], ['owner', 'collaborator'], cb)
1424
else if project_id?
1425
# Attempt to write by a *project*
1426
if project_id == x
1427
cb()
1428
else
1429
cb("project not allowed to write to syncstring in different project")
1430
1431
1432
# Check permissions for querying for syncstrings in a project
1433
_syncstrings_check: (obj, account_id, project_id, cb) =>
1434
#dbg = @dbg("_syncstrings_check")
1435
#dbg(misc.to_json([obj, account_id, project_id]))
1436
if not misc.is_valid_uuid_string(obj?.project_id)
1437
cb("project_id (='#{obj?.project_id}') must be a valid uuid")
1438
return
1439
if project_id?
1440
if project_id == obj.project_id
1441
# The project can access its own syncstrings
1442
cb()
1443
else
1444
cb("projects can only access their own syncstrings") # for now at least!
1445
return
1446
if account_id?
1447
# Access request by a client user
1448
@_require_project_ids_in_groups(account_id, [obj.project_id], ['owner', 'collaborator'], cb)
1449
else
1450
cb("only users and projects can access syncstrings")
1451
1452
_last_awaken_time = {}
1453
awaken_project = (db, project_id) ->
1454
# throttle so that this gets called *for a given project* at most once every 30s.
1455
now = new Date()
1456
if _last_awaken_time[project_id]? and now - _last_awaken_time[project_id] < 30000
1457
return
1458
_last_awaken_time[project_id] = now
1459
dbg = db._dbg("_awaken_project(project_id=#{project_id})")
1460
if not db.compute_server?
1461
dbg("skipping since no compute_server defined")
1462
return
1463
dbg("doing it...")
1464
db.compute_server.project
1465
project_id : project_id
1466
cb : (err, project) =>
1467
if err
1468
dbg("err = #{err}")
1469
else
1470
dbg("requesting whole-project save")
1471
project.save() # this causes saves of all files to storage machines to happen periodically
1472
project.ensure_running
1473
cb : (err) =>
1474
if err
1475
dbg("failed to ensure running")
1476
else
1477
dbg("also make sure there is a connection from hub to project")
1478
# This is so the project can find out that the user wants to save a file (etc.)
1479
db.ensure_connection_to_project?(project_id)
1480
1481
1482