Sharedwww / msri07 / sched.pyOpen in CoCalc
Author: William A. Stein
1
######################################################################
2
# This file was *autogenerated* from the file sched.sage.
3
######################################################################
4
class Speaker:
5
def __init__(self, name, employer=None, url=None, email=None, status=None, title=None, abstract=None):
6
self.name = name
7
self.employer = employer
8
self.email = email
9
self.url = url
10
self.title = title
11
self.abstract = abstract
12
self.status = status
13
if status != 'invited' and status != 'contributed':
14
raise ValueError, '%s: "%s"'%(name, status)
15
16
def __repr__(self):
17
return self.name
18
19
def last_name(self):
20
name = self.name
21
if ' and ' in self.name:
22
name = self.name.split(' and ')[Integer(0)]
23
return ' '.join(name.split()[Integer(1):])
24
25
def first_name(self):
26
return self.name.split()[Integer(0)]
27
28
def __cmp__(self, right):
29
return cmp(self.last_name(), right.last_name())
30
31
def tag(self):
32
return self.name.replace(' ','')
33
34
def html_full(self):
35
return """
36
<a name="%s">
37
<h2><font color="darkred"><b>%s</b></font>:
38
<font color="#333333"><i>%s</i></font><br></h2>
39
%s<br><br>
40
%s"""%(
41
self.tag(), self.last_name(), self.title, self.html_name(), self.html_abstract())
42
43
def html_abstract(self):
44
s = self.abstract.replace('\n\n','<p>').replace('``','"').replace("''",'"')
45
if '*' in s:
46
i = s.find('*')
47
s = s.replace('*', '<li>')
48
s = s[:i] + '<ul>' + s[i:]
49
s += '</ul>'
50
return s
51
52
def html_short(self):
53
return '%s<br><b><i>%s</i></b>'%(self.html_name(), self.html_title())
54
55
def html_name(self):
56
if self.url:
57
url = self.url
58
else:
59
url = 'mailto:%s'%self.email
60
if self.employer:
61
emp = ' - %s'%self.employer
62
else:
63
emp = ''
64
return '<a href="%s">%s %s</a>'%(url, self.name, emp)
65
66
def html_title(self):
67
return '<a href="#%s">%s</a>'%(self.tag(), self.title)
68
69
70
############################################################################
71
72
73
def wiki_full(self):
74
return """== %s: %s ==
75
76
[%s %s %s]
77
78
%s
79
"""%(self.last_name(), self.title, self.wiki_url(), self.name, self.wiki_employer(), self.wiki_abstract())
80
81
def wiki_employer(self):
82
if self.employer == '':
83
return ''
84
else:
85
return '- %s'%self.employer
86
87
def wiki_abstract(self):
88
ab = self.abstract.replace('``','"').replace("''",'"')
89
if '*' in ab:
90
return ab.strip()
91
return misc.word_wrap(ab).strip()
92
93
def wiki_short(self):
94
return '%s<br>%s'%(self.wiki_name(), self.wiki_title())
95
96
def wiki_url(self):
97
if self.url:
98
url = self.url
99
else:
100
url = 'mailto:%s'%self.email
101
return url
102
103
104
############################################################################
105
106
def latex_full(self):
107
s = """
108
{\\large \\bf %s: {\\em\sf %s}}\\vspace{1ex}\\newline
109
{\em %s}\\vspace{1ex}\\newline
110
\url{%s}\\vspace{1ex}\\newline
111
{%s}
112
"""%(
113
self.last_name(), self.title, self.latex_name(), self.latex_url(), self.latex_abstract())
114
return s.strip()
115
116
def latex_abstract(self):
117
s = self.abstract
118
if '*' in s:
119
i = s.find('*')
120
s = s.replace('*', '\\item')
121
s = s[:i] + '\\begin{itemize}' + s[i:]
122
s += '\\end{itemize}'
123
return s
124
125
def latex_short(self):
126
return '%s - {\\em\\sf %s}'%(self.last_name(), self.title)
127
128
def latex_name(self):
129
if self.employer:
130
emp = ' - %s'%self.employer
131
else:
132
emp = ''
133
return '%s %s'%(self.name, emp)
134
135
def latex_url(self):
136
if self.url:
137
return self.url
138
return self.email
139
140
141
142
speakers = [\
143
Speaker('David Bailey',
144
'Lawrence Berkeley Labs (LBL)',
145
'http://crd.lbl.gov/~dhbailey/',
146
'',
147
'invited',
148
'Experimental Mathematics and High-Performance Computing',
149
"""
150
Recent developments in ``experimental mathematics'' have underscored the value of high-performance computing in modern mathematical research. The most frequent computations that arise here are high-precision (typically several-hundred-digit accuracy) evaluations of integrals and series, together with integer relation detections using the ``PSLQ'' algorithm. Some recent highlights in this arena include: (2) the discovery of ``BBP'-type formulas for various mathematical constants, including pi and log(2); (3) the discovery of analytic evaluations for several classes of multivariate zeta sums; (4) the discovery of Apery-like formulas for the Riemann zeta function at integer arguments; and (5) the discovery of analytic evaluations and linear relations among certain classes of definite integrals that arise in mathematical physics. The talk will include a live demo of the ``experimental mathematician's toolkit''.
151
"""),
152
Speaker('Robert Bradshaw',
153
'University of Washington',
154
'',
155
'[email protected]',
156
'contributed',
157
'Loosely Dependent Parallel Processes',
158
"""
159
Many parallel computational algorithms involve dividing the problem into several smaller tasks and running each task in isolation in parallel. Often these tasks are the same procedure over a set of varying parameters. Inter-process communication might not be needed, but the results of one task may influence what subsequent tasks need to be performed. I will discuss the concept of job generators, or custom-written tasks that generate other tasks and process their feedback. I would discuss this specifically in the context of integer factorization.
160
"""),
161
162
Speaker('Henry Cohn',
163
'Microsoft Research',
164
'http://research.microsoft.com/~cohn/',
165
'',
166
'invited',
167
'Parallel Computation Tools for Research: A Wishlist',
168
''),
169
170
Speaker('Gene Cooperman',
171
'Northeastern University',
172
'http://www.ccs.neu.edu/home/gene/',
173
'',
174
'invited',
175
'Disk-Based Parallel Computing: A New Paradigm',
176
"""
177
One observes that 100 local commodity disks of an array have approximately the same streaming bandwidth as a single RAM subsystem. Hence, it is proposed to treat a cluster as if it were a single computer with tens of terabytes of data, and with RAM serving as cache for disk. This makes feasible the solution of truly large problems that are currently space-limited. We also briefly summarize other recent activities of our working group: lessons from supporting ParGAP and ParGCL; progress toward showing that 20 moves suffice to solve Rubik's cube; lessons about marshalling from support of ParGeant4 (parallelization of a million-line program at CERN); and experiences at the SCIEnce workshop (symbolic-computing.org), part of a 5-year, 3.2 million euro, European Union project. Our new distributed checkpointing package now provides a distributed analog of a SAVE-WORKSPACE command, for use in component-based symbolic software, such as SAGE."""),
178
179
Speaker('Alan Edelman',
180
'MIT',
181
'http://www-math.mit.edu/~edelman/',
182
'',
183
'invited',
184
'Interactive Parallel Supercomputing: Today: MATLAB(r) and Python coming Cutting Edge: Symbolic Parallelism with Mathematica(r) and MAPLE(r)',
185
"""Star-P is a unique technology offered by Interactive Supercomputing after
186
nurturing at MIT. Star-P through its abstractions is solving the ease of use
187
problem that has plagued supercomputing. Some of the innovative features of
188
Star-P are the ability to program in MATLAB, hook in task parallel codes
189
written using a processor free abstraction, hook in existing parallel codes,
190
and obtain the performance that represents the HPC promise. All this is
191
through a client/server interface. Other clients such as Python or R could
192
be possible. The MATLAB, Python, or R becomes the "browser." Parallel
193
computing remains challenging, compared to serial coding but it is now that
194
much easier compared to solutions such as MPI. Users of MPI can plug in
195
their previously written codes and libraries and continue forward in Star-P.
196
197
Numerical computing is challenging enough in a parallel environment,
198
symbolic computing will require even more research and more challenging
199
problems to be solved. In this talk we will demonstrate the possibilities
200
and the pitfalls.
201
"""),
202
203
Speaker('Brian Granger',
204
'Tech X Corp.',
205
'http://txcorp.com',
206
'',
207
'invited',
208
'Interactive Parallel Computing using Python and IPython',
209
"""
210
Interactive computing environments, such as Matlab, IDL and
211
Mathematica are popular among researchers because their
212
interactive nature is well matched to the exploratory nature of
213
research. However, these systems have one critical weakness:
214
they are not designed to take advantage of parallel computing
215
hardware such as multi-core CPUs, clusters and supercomputers.
216
Thus, researchers usually turn to non-interactive compiled
217
languages, such as C/C++/Fortran when parallelism is needed.
218
219
In this talk I will describe recent work on the IPython project
220
to implement a software architecture that allows parallel
221
applications to be developed, debugged, tested, executed and
222
monitored in a fully interactive manner using the Python
223
programming language. This system is fully functional and allows
224
many types of parallelism to be expressed, including message
225
passing (using MPI), task farming, shared memory, and custom user
226
defined approaches. I will describe the architecture, provide an
227
overview of its basic usage and then provide more sophisticated
228
examples of how it can be used in the development of new parallel
229
algorithms. Because IPython is one of the components of the SAGE
230
system, I will also discuss how IPython's parallel computing
231
capabilities can be used in that context.
232
"""),
233
234
Speaker('Robert Harrison',
235
'Oak Ridge National Lab',
236
'http://www.csm.ornl.gov/ccsg/html/staff/harrison.html',
237
'',
238
'invited',
239
'Science at the petascale: tools in the tool box',
240
"""
241
Petascale computing will require coordinating the actions of 100,000+
242
processors, and directing the flow of data between up to six levels
243
of memory hierarchy and along channels that differ by over a factor of
244
100 in bandwidth. Amdahl's law requires that petascale applications
245
have less than 0.001% sequential or replicated work in order to
246
be at least 50% efficient. These are profound challenges for all but
247
the most regular or embarrassingly parallel applications, yet we also
248
demand that not just bigger and better, but fundamentally new science.
249
In this presentation I will discuss how we are attempting to confront
250
simultaneously the complexities of petascale computation while
251
increasing our scientific productivity. I hope that I can convince you
252
that our development of MADNESS (multiresolution adaptive numerical
253
scientific simulation) is not as crazy as it sounds.
254
255
This work is funded by the U.S. Department of Energy, the division of
256
Basic Energy Science, Office of Science, and was performed in part
257
using resources of the National Center for Computational Sciences, both
258
under contract DE-AC05-00OR22725 with Oak Ridge National Laboratory.
259
"""),
260
261
Speaker('Bill Hart',
262
'Warwick',
263
'http://www.maths.warwick.ac.uk/~masfaw/',
264
'',
265
'invited',
266
'Parallel Computation in Number Theory',
267
"""
268
This talk will have two sections. The first will
269
introduce a new library for number theory which is
270
under development, called FLINT. I will discuss the
271
various algorithms already available in FLINT, compare
272
them with similar implementations available elsewhere,
273
and speak about what the future holds for FLINT, with
274
the focus on parallel processing and integration into
275
Pari and the SAGE package.
276
277
The second part of the talk will focus on low level
278
implementation details of parallel algorithms in
279
number theory. In particular I will discuss the design
280
decisions that we have made so far in the FLINT
281
library to facilitate multicore and multiprocessor
282
platforms.
283
284
If time permits, there will be a live demonstration.
285
"""
286
),
287
288
Speaker('Yozo Hida',
289
'UC Berkeley',
290
'http://www.cs.berkeley.edu/~yozo/',
291
'',
292
'invited',
293
'Moving Lapack and ScaLapack to Higher Precision without Too Much Work',
294
"""I will be discussing recent developments in Lapack and ScaLapack
295
libraries, along with some recent work on incorporating higher
296
precision into Lapack and ScaLapack."""),
297
298
Speaker('Samee Khan',
299
'University of Texas, Arlington',
300
'',
301
'[email protected]',
302
'contributed',
303
'Game Theoretical Solutions for Data Replication in Distributed Computing Systems',
304
"""
305
Data replication is an essential technique employed to reduce the user
306
perceived access time in distributed computing systems. One can find numerous
307
algorithms that address the data replication problem (DRP) each contributing in
308
its own way. These range from the traditional mathematical optimization
309
techniques, such as, linear programming, dynamic programming, etc. to the
310
biologically inspired meta-heuristics. We aim to introduce game theory as a new
311
oracle to tackle the data replication problem. The beauty of the game theory
312
lies in its flexibility and distributed architecture, which is well-suited to
313
address the DRP. We will specifically use action theory (a special branch of
314
game theory) to identify techniques that will effectively and efficiently solve
315
the DRP. Game theory and its necessary properties are briefly introduced,
316
followed by a through and detailed mapping of the possible game theoretical
317
techniques and DRP. As an example, we derive a game theoretical algorithm for
318
the DRP, and propose several extensions of it. An elaborate experimental setup
319
is also detailed, where the derived algorithm is comprehensively evaluated
320
against three conventional techniques, branch and bound, greedy and genetic
321
algorithms.
322
"""),
323
324
Speaker('Ilias Kotsireas',
325
'Laurier University, Canada',
326
'',
327
'[email protected]',
328
'contributed',
329
'Combinatorial Designs: constructions, algorithms and new results',
330
"""
331
We plan to describe recent progress in the search for combinatorial designs of
332
high order. This progress has been achieved via some algorithmic concepts, such
333
as the periodic autocorrelation function, the discrete Fourier transform and
334
the power spectral density criterion, in conjunction with heuristic
335
observations on plausible patterns for the locations of zero elements. The
336
discovery of such patterns is done using meta-programming and automatic code
337
generation (and perhaps very soon data mining algorithms) and reveals the
338
remarkable phenomenon of crystalization, which does not yet possess a
339
satisfactory explanation. The resulting algorithms are amenable to parallelism
340
and we have implemented them on supercomputers, typically as implicit parallel
341
algorithms.
342
"""),
343
344
Speaker('Anton Leykin',
345
'IMA (Minessota)',
346
'',
347
'[email protected]',
348
'contributed',
349
'Parallel computation of Grobner bases in the Weyl algebra',
350
"""
351
The usual machinery of Grobner bases can be applied to non-commutative algebras
352
of the so-called solvable type. One of them, the Weyl algebra, plays the
353
central role in the computations with $D$-modules. The practical complexity of
354
the Grobner bases computation in the Weyl algebra is much higher than in the
355
(commutative) polynomial rings, therefore, calling naturally for parallel
356
computation. We have developed an algorithm to perform such computation
357
employing the master-slave paradigm. Our implementation, which has been carried
358
out in C++ using MPI, draws ideas from both Buchberger algorithm and
359
Faugere's $F_4$. It exhibits better speedups for the Weyl algebra in
360
comparison to polynomial problems of the similar size.
361
"""),
362
363
Speaker('Jason Martin',
364
'James Madison University',
365
'http://www.math.jmu.edu/~martin/',
366
'',
367
'invited',
368
'MPMPLAPACK: The Massively Parallel Multi-Precision Linear Algebra Package',
369
"""
370
For several decades, researchers in the applied fields have had access
371
to powerful linear algebra packages designed to run on massively
372
parallel systems. Libraries such as ScaLAPACK and PLAPACK provide a
373
rich set of functions (usually based on BLAS) for performing linear
374
algebra over single or double precision real or complex data.
375
However, such libraries are of limited use to researchers in discrete
376
mathematics who often need to compute with multi-precision data types.
377
378
This talk will cover a massively parallel multi-precision linear
379
algebra package that I am attempting to write. The goal of this C/MPI
380
library is to provide drop-in parallel functionality to existing
381
number theory and algebraic geometry programs (such as Pari, Sage, and
382
Macaulay2) while preserving enough flexibility to eventually become a
383
full multi-precision version of PLAPACK. I will describe some
384
architectural assumptions, design descisions, and benchmarks made so
385
far and actively solicit input from the audience (I'll buy coffee for
386
the person who suggests the best alternative to the current name).
387
"""),
388
389
Speaker('Marc Moreno Maza',
390
'Western Ontario',
391
'http://www.csd.uwo.ca/~moreno/',
392
'',
393
'invited',
394
'Component-level Parallelization of Triangular Decompositions',
395
"""
396
We discuss the parallelization of algorithms for solving polynomial systems symbolically by way of triangular decompositions. We introduce a component-level parallelism for which the number of processors in use depends on the geometry of the solution set of the input system. Our long term goal is to achieve an efficient multi-level parallelism: coarse grained (component) level for tasks computing geometric objects in the solution sets, and medium/fine grained level for polynomial arithmetic such as GCD/resultant computation within each task.
397
398
Component-level parallelism belongs to the class of dynamic irregular parallel applications, which leads us to address the following questions: How to discover and use geometrical information, at an early stage of the solving process, that would be favorable to component-level parallel execution and load balancing? How to use this level of parallel execution to effectively eliminate unnecessary computations? What implementation mechanisms are feasible?
399
400
We report on the effectiveness of the approaches that we have applied, including ``modular methods'', ``solving by decreasing order of dimension'', ``task cost estimation for guided scheduling''. We have realized a preliminary implementation on a SMP using multiprocessed parallelism in Aldor and shared memory segments for data communication. Our experimentation shows promising speedups for some well-know problems. We expect that this speedup would add a multiple factor to the speedup of medium/fine grained level parallelization as parallel GCD/resultant computations.
401
"""),
402
403
Speaker('Alfred Noel',
404
'UMass Boston / MIT',
405
'http://www.math.umb.edu/~anoel/',
406
'',
407
'invited',
408
'Structure and Representations of Real Reductive Lie Groups: A Computational Approach',
409
"""
410
I work with David Vogan (MIT) on the Atlas of Lie Groups and Representations. This is a project to make available information about representations of semi-simple Lie groups over real and p-adic fields. Of particular importance is the problem of the unitary dual: classifying all of the irreducible unitary representations of a given Lie group.
411
412
I will present some of the main ideas behind the current and very preliminary version of the software. I will provide some examples also. Currently, we are developing sequential algorithms that are implemented in C++. However, because of time and space complexity we are slowly moving in the direction of parallel computation. For example, David Vogan is experimenting with multi-threads in the K-L polynomials computation module.
413
414
This talk is in memory of Fokko du Cloux, the French mathematician who, until a few months ago, was the lead developer. He died this past November.
415
"""),
416
417
Speaker('Clement Pernet',
418
'University of Waterloo',
419
'',
420
'[email protected]',
421
'invited',
422
'Parallelism perspectives for the LinBox library',
423
"""
424
LinBox is a generic library for efficient linear algebra with blackbox
425
or dense matrices over a finite field or Z. We first present a few
426
notions of the sequential implementations of selected problems, such
427
as the system resolution or multiple triangular system resolution, or
428
the chinese remaindering algorithm. Then we expose perspectives for
429
incorporating parallelism in LinBox, including multi-prime lifting for
430
system resolution over Q, or parallel chinese remaindering. This last
431
problem raises the difficult problem of combining early termination
432
and work-stealing techniques.
433
"""),
434
435
Speaker('Yi Qiang',
436
'University of Washington',
437
'',
438
'http://www.yiqiang.net/',
439
'invited',
440
'Distributed Computing using SAGE',
441
"""
442
Distributed SAGE (DSAGE) is a distributed computing framework for
443
SAGE which allows users to easily parallelize computations and
444
interact with them in a fluid and natural way. This talk will be
445
focused on the design and implementation of the distributed computing
446
framework in SAGE. I will describe the application of the
447
distributed computing framework to several problems, including the
448
problem of integer factorization and distributed ray tracing.
449
Demonstrations of using Distributed SAGE to tackle both problems will
450
be given plus information on how to parallelize your own problems. I
451
will also talk about design issues and considerations that have been
452
resolved or are yet unresolved in implementing Distributed SAGE.
453
"""),
454
455
Speaker('Jean-Louis Roch',
456
'ID-IMAG (France)',
457
'http://www-id.imag.fr/Laboratoire/Membres/Roch_Jean-Louis/perso.html',
458
'[email protected]',
459
'invited',
460
'Processor oblivious parallel algorithms with provable performances: applications',
461
"""
462
Based on a work-stealing schedule, the on-line coupling of two algorithms
463
(one sequential; the other one recursive parallel and fine grain) enables
464
the design of programs that scale with provable performances on various
465
parallel architectures, from multi-core machines to heterogeneous grids,
466
including processors with changing speeds. After presenting a generic scheme
467
and framework, on top of the middleware KAAPI/Athapascan that efficiently
468
supports work-stealing, we present practical applications such as: prefix
469
computation, real time 3D-reconstruction, Chinese remainder modular lifting
470
with early termination, data compression.
471
"""),
472
473
Speaker('Vladimir Tonchev',
474
'Michigan Tech',
475
'',
476
'[email protected]',
477
'contributed',
478
'Combinatorial designs and code synchronization',
479
"""
480
Difference systems of sets are combinatorial designs that arise in connection
481
with code synchronization. Algebraic constructions based on cyclic difference
482
sets and finite geometry and algorithms for finding optimal difference systems
483
of sets are discussed.
484
"""),
485
486
Speaker('Jan Verschelde',
487
'UIC',
488
'http://www.math.uic.edu/~jan/',
489
'[email protected]',
490
'invited',
491
'Parallel Homotopy Algorithms to Solve Polynomial Systems',
492
"""
493
A homotopy is a family of polynomial systems which defines a deformation
494
from a system with known solutions to a system whose solutions are needed.
495
Via dynamic load balancing we may distribute the solution paths so that a
496
close to optimal speed up is achieved. Polynomial systems -- such as the
497
9-point problem in mechanical design leading to 286,720 paths -- whose
498
solving required real supercomputers twenty years ago can now be handled
499
by modest personal cluster computers, and soon by multicore multiprocessor
500
workstations. Larger polynomial systems however may lead to more
501
numerical difficulties which may skew the timing results, so that
502
attention must be given to ``quality up'' as well. Modern homotopy methods
503
consist of sequences of different families of polynomial systems so that
504
not only the solution paths but also parametric polynomial systems must be
505
exchanged frequently.
506
"""),
507
508
Speaker('Thomas Wolf and Winfried Neun',
509
'',
510
'',
511
'[email protected] [email protected]',
512
'contributed',
513
'Parallel sparsening and simplification of systems of equations',
514
"""
515
In a Groebner Basis computation the guiding principle for pairing and
516
`reducing' equations is a total ordering of monomials or of derivatives for
517
differential Groebner Bases. If reduction based on an ordering is replaced by
518
reduction to minimize the number of terms of an equation through another
519
equation then on the downside the resulting (shorter) system does depend on the
520
order of pairing of equations for shortening but on the upside there are number
521
of advantages that makes this procedure a perfect addition/companion to the
522
Groebner Basis computation. Such features are:
523
524
* In contrast to Groebner Basis computations, this algorithm is safe in the sense that it does not need any significant amount of memory, even not temporarily.
525
* It is self-enforcing, i.e. the shorter equations become, the more useful for shortening other equations they potentially get.
526
* Equations in a sparse system are less coupled and a cost effective elimination strategy (ordering) is much easier to spot (for humans and computers) than for a dense system.
527
* Statistical tests show that the probability of random polynomials to factorize increases drastically the fewer terms a polynomial has.
528
* By experience the shortening of partial differential equations increases their chance to become ordinary differential equations which are usually easier to solve explicitly.
529
* The likelihood of shortenings to be possible is especially high for large overdetermined systems. This is because the number of pairings goes quadratically with the number of equations but for overdetermined systems, more equations does not automatically mean more unknowns to occur which potentially obstruct shortening by introducing terms that can not cancel.
530
* The algorithm offers a fine grain parallelization in the computation to shorten one equation with another one and a coarse grain parallelization in that any pair of two equations of a larger system can be processed in parallel. In the talk we will present the algorithm, show examples supporting the above statements and give a short demo.
531
"""),
532
533
Speaker('Kathy Yelick',
534
'UC Berkeley',
535
'http://www.cs.berkeley.edu/~yelick/',
536
'[email protected]',
537
'invited',
538
'Programming Models for Parallel Computing',
539
"""
540
The introduction of multicore processors into mainstream computing is
541
creating a revolution in software development. While Moore's
542
Law continues to hold, most of the increases in transistor density will be
543
used for explicit, software-visible parallelism, rather than increasing
544
clock rate. The major open question is how these machines will be
545
programmed.
546
In this talk I will give an overview of some of the hardware trends, and
547
describe programming techniques using Partitioned Global Address Space
548
(PGAS)
549
languages. PGAS languages have emerged as a viable alternative to message
550
passing programming models for large-scale parallel machines and clusters.
551
They also offer an alternative to shared memory programming models (such as
552
threads and OpenMP) and the possibility of a single programming model that
553
will work well across a wide range of shared and distributed memory
554
platforms.
555
PGAS languages provide a shared memory abstraction with support for locality
556
through the user of distributed data types. The three most mature PGAS
557
languages (UPC, CAF and Titanium) offer a statically partitioned global
558
address space with a static SPMD control model, while languages emerging
559
from the DARPA HPCS program are more dynamic. I will describe these
560
languages as well as our experience using them in both numeric and
561
symbolic applications.""")
562
563
]
564
565
566
def find(name):
567
name = name.lower()
568
ans = None
569
for v in speakers:
570
if name in v.name.lower():
571
if not ans is None:
572
raise RuntimeError, "ambiguous search for '%s'"%name
573
ans = v
574
if ans is None:
575
raise RuntimeError, "Speaker '%s' not found"%name
576
return ans
577
578
579
580
##############
581
582
def html_talks(file='a'):
583
a = open('%s.html'%file,'w')
584
a.write(r"""
585
<html>
586
<head>
587
<title>
588
Interactive Parallel Computation in Support of Research
589
in Algebra, Geometry and Number Theory: Abstracts
590
</title>
591
592
<style>
593
div.box {
594
border:1px solid #004400;
595
padding:10px;
596
margin-left:30px;
597
margin-right:30px;
598
}
599
table {
600
border-bottom:1px solid lightgray;
601
border-top:1px solid lightgray;
602
}
603
604
a:active { color: #ff0000; }
605
a:hover { background-color: #aaffaa}
606
a { text-decoration: none; }
607
608
div.space {
609
padding:50px;
610
margin-top:15px;
611
background-color:#eeeeee;
612
}
613
614
615
h2.top {
616
text-align:center;
617
}
618
619
div.bar {
620
padding:1px;
621
background-color:#999999;
622
border-top: 1px solid black;
623
border-bottom: 1px solid black;
624
margin:2px;
625
}
626
627
</style>
628
629
<body>
630
<h1 align=center>Titles and Abstracts</h1>
631
632
These are the abstracts for all the talks scheduled for <a href="index.html">MSRI Parallel Computation
633
the workshop</a>, listed in
634
alphabetical order. For times, see the <a href="schedule.html">the schedule</a> itself.
635
<br>
636
<hr>
637
""")
638
for s in speakers:
639
a.write(s.html_full())
640
a.write('<br><br><hr>')
641
a.write('</body></html>')
642
a.close()
643
644
def wiki_talks():
645
a = open('%s.txt'%file,'w')
646
a.write(r"""
647
= Titles and Abstracts =
648
These are the abstracts for all the talks scheduled for [:msri07: the workshop], listed in
649
alphabetical order. For times, see the [:msri07/schedule: schedule] itself.
650
651
[[TableOfContents]]
652
653
""")
654
655
for s in speakers:
656
a.write(s.wiki_full())
657
a.write('\n\n')
658
a.close()
659
660
def pdf_abstracts(file='a', verbose=False):
661
a = open('%s.tex'%file,'w')
662
a.write(r"""
663
\documentclass{article}
664
\usepackage{url}
665
\usepackage{fullpage}
666
\title{Titles and Abstracts:\vspace{4ex}\mbox{}\\
667
\Large Interactive Parallel Computation in Support of Research in\\Algebra, Geometry
668
and Number Theory\vspace{4ex}\mbox{}\\
669
\large A Workshop at MSRI Jan 29-Feb 2 organized by\\Burhanuddin, Demmel, Goins, Kaltofen, Perez, Stein, Verrill, and Weening}
670
\begin{document}
671
\maketitle
672
\par\noindent
673
""")
674
for s in speakers:
675
a.write(s.latex_full())
676
a.write('\\mbox{}\\vspace{6ex}\n\n\n\\par\\noindent')
677
a.write('\\end{document}')
678
a.close()
679
if not verbose:
680
z = '1>/dev/null'
681
else:
682
z = ''
683
os.system('pdflatex %s.tex < /dev/null %s'%(file, z))
684
685
########################################################################################
686
687
class Day:
688
def __init__(self, name, theme='', discussion='', invited=[], contributed=[]):
689
self.name = name
690
self.theme = theme
691
self.discussion = discussion
692
self.invited = invited
693
self.contributed = contributed
694
695
def __repr__(self):
696
return self.name
697
698
def html_contrib(self, n):
699
c = self.contributed
700
if n < len(c):
701
if 'Cohn' in c[n].name:
702
return c[n].html_short() + ' (part %s)'%n
703
else:
704
return '(Optional) ' + c[n].html_short()
705
else:
706
return 'Break'
707
708
def latex_contrib(self, n):
709
c = self.contributed
710
if n < len(c):
711
if 'Cohn' in c[n].name:
712
return c[n].latex_short() + ' (part %s)'%n
713
else:
714
return '(Optional) '+ c[n].latex_short()
715
else:
716
return 'Break'
717
718
def html(self):
719
s = """
720
<a name="%s">
721
<table class="ws" width=90%% align=center>
722
<tr><td class='time' width=10%%></td><td class="day" width=80%%><font size=+3><b>%s</b><br><font size=+2><b>Theme: %s</b></font></td></tr>
723
<tr><td class='time'>9:00-10:00</td><td>%s</td></tr>
724
<tr><td class='time'>10:00-10:30</td><td class="break">Tea Break</td></tr>
725
<tr><td class='time'>10:30-11:30</td><td>%s</td></tr>
726
<tr><td class='time'>11:30-12:30</td><td>%s</td></tr>
727
<tr><td class='time'>12:30-1:30</td><td class="break">Lunch</td></tr>
728
<tr><td class='time'>1:30-2:00</td><td>%s</td></tr>
729
<tr><td class='time'>2:00-2:30</td><td>%s</td></tr>
730
<tr><td class='time'>2:30-3:30</td><td class="discuss"><b>Discussion:</b> %s</td></tr>
731
"""%(
732
self.name.split()[Integer(0)].strip(',').lower(),
733
self.name, self.theme,
734
self.invited[Integer(0)].html_short(),
735
self.invited[Integer(1)].html_short(),
736
self.invited[Integer(2)].html_short(),
737
self.html_contrib(Integer(0)),
738
self.html_contrib(Integer(1)),
739
self.discussion)
740
if self.name != 'Friday, Feb 2':
741
s += """
742
<tr><td class='time'>3:30-4:00</td><td class="break">Tea Break</td></tr>
743
<tr><td class='time'>4:00-5:30</td><td>Working Sessions</td></tr>
744
<tr><td class='time'>6:00-8:00</td><td class="break">Dinner</td></tr>
745
<tr><td class='time'>8:00-10:00</td><td class="break">Coffee Shops...</td></tr>
746
"""
747
s += """
748
</table>
749
"""
750
return s
751
752
def latex(self):
753
s = """
754
{\\Large \\bf %s}\\vspace{1ex}
755
756
\\begin{tabular}{|l|l|}\\hline
757
& \\begin{minipage}{0.7\\textwidth}Theme: %s\\end{minipage} \\\\ \\hline
758
9:00--10:00 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\
759
10:00--10:30 & \\begin{minipage}{0.7\\textwidth}Tea Break\\end{minipage} \\\\
760
10:30--11:30 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\
761
11:30--12:30 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\ \\hline
762
12:30--1:30 & \\begin{minipage}{0.7\\textwidth}Lunch\\end{minipage} \\\\
763
1:30--2:00 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\
764
2:00--2:30 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\ \\hline
765
2:30--3:30 & \\begin{minipage}{0.7\\textwidth}Discussion: %s\\end{minipage} \\\\
766
"""%(
767
self.name, self.theme,
768
self.invited[Integer(0)].latex_short(),
769
self.invited[Integer(1)].latex_short(),
770
self.invited[Integer(2)].latex_short(),
771
self.latex_contrib(Integer(0)),
772
self.latex_contrib(Integer(1)),
773
self.discussion
774
)
775
if self.name != 'Friday, Feb 2':
776
s += """
777
3:30--4:00 & \\begin{minipage}{0.7\\textwidth}Tea Break\\end{minipage} \\\\
778
4:00--5:30 & \\begin{minipage}{0.7\\textwidth}Working Sessions\\end{minipage} \\\\
779
6:00--8:00 & \\begin{minipage}{0.7\\textwidth}Dinner\\end{minipage} \\\\
780
8:00--10:00 & \\begin{minipage}{0.7\\textwidth}Coffee Shops...\\end{minipage} \\\\ \\hline
781
"""
782
s += '\\end{tabular}'
783
return s
784
785
days = [\
786
Day('Monday, Jan 29',
787
'What do we want and what can we expect from applying parallel techniques to pure mathematics research tools?',
788
"""Parallel methods for mathematics software for doing algebra, geometry
789
and number theory -- What can we expect? What are the right problems to attack first
790
and get the most for our work?
791
""",
792
[find('pernet'),
793
find('granger'),
794
find('roch')],
795
[find('cohn'), find('cohn')]),
796
797
Day('Tuesday, Jan 30',
798
'Algebra',
799
'Parallel methods for algebra (commutative algebra, linear algebra, group theory).',
800
[find('yelick'), find('hida'), find('noel')],
801
[find('Leykin'), find('tonchev')]
802
),
803
804
Day('Wednesday, Jan 31',
805
'Number Theory',
806
"Parallel methods for number theory.",
807
[find('martin'), find('hart'), find('qiang')],
808
[find('bradshaw'), find('kotsireas')]
809
),
810
811
Day('Thursday, Feb 1',
812
'Geometry',
813
'Parallel methods for geometry',
814
[find('verschelde'), find('moreno'), find('bailey')],
815
[find('wolf'), find('neun')]),
816
817
Day('Friday, Feb 2',
818
'Large-Scale Parallel Computation',
819
'Wrap-up session',
820
[find('harrison'), find('cooperman'), find('edelman')],
821
[find('khan')])
822
]
823
824
825
def html_sched(file='a'):
826
a = open('%s.html'%file,'w')
827
a.write(r"""
828
<html>
829
<head>
830
<title>
831
Interactive Parallel Computation in Support of Research
832
in Algebra, Geometry and Number Theory: Schedule
833
</title>
834
835
<style>
836
div.box {
837
border:1px solid #004400;
838
padding:10px;
839
margin-left:30px;
840
margin-right:30px;
841
}
842
843
table.ws {
844
border-width: 1px 1px 1px 1px;
845
border-spacing: 0px;
846
border-style: solid solid solid solid ;
847
border-color: gray gray gray gray;
848
border-collapse: separate;
849
background-color: #333355;
850
}
851
table.ws th {
852
border-width: 1px 1px 1px 1px;
853
padding: 1px 1px 1px 1px;
854
border-style: solid solid solid solid ;
855
border-color: gray gray gray gray;
856
background-color: white;
857
-moz-border-radius: 0px 0px 0px 0px;
858
padding:10px;
859
}
860
table.ws td {
861
border-width: 1px 1px 1px 1px;
862
padding: 1px 1px 1px 1px;
863
border-style: solid solid solid solid ;
864
border-color: gray gray gray gray;
865
background-color: white;
866
-moz-border-radius: 0px 0px 0px 0px;
867
padding:10px;
868
}
869
table.ws td.break {
870
border-width: 1px 1px 1px 1px;
871
padding: 1px 1px 1px 1px;
872
border-style: solid solid solid solid ;
873
border-color: gray gray gray gray;
874
background-color: #80ff80;
875
-moz-border-radius: 0px 0px 0px 0px;
876
padding:10px;
877
}
878
879
table.ws td.day {
880
border-width: 1px 1px 1px 1px;
881
padding: 1px 1px 1px 1px;
882
border-style: solid solid solid solid ;
883
border-color: gray gray gray gray;
884
background-color: #e0e0ff;
885
-moz-border-radius: 0px 0px 0px 0px;
886
padding:10px;
887
}
888
889
table.ws td.discuss {
890
border-width: 1px 1px 1px 1px;
891
padding: 1px 1px 1px 1px;
892
border-style: solid solid solid solid ;
893
border-color: gray gray gray gray;
894
background-color: #e0ffe0;
895
-moz-border-radius: 0px 0px 0px 0px;
896
padding:10px;
897
}
898
899
table.ws td.time {
900
border-width: 1px 1px 1px 1px;
901
padding: 1px 1px 1px 1px;
902
border-style: solid solid solid solid ;
903
border-color: gray gray gray gray;
904
background-color: #ffffe0;
905
-moz-border-radius: 0px 0px 0px 0px;
906
padding:10px;
907
}
908
909
a:active { color: #ff0000; }
910
a:hover { background-color: #aaffaa}
911
a { text-decoration: none; }
912
913
div.space {
914
padding:50px;
915
margin-top:15px;
916
background-color:#eeeeee;
917
}
918
919
920
h2.top {
921
text-align:center;
922
}
923
924
div.bar {
925
padding:1px;
926
background-color:#999999;
927
border-top: 1px solid black;
928
border-bottom: 1px solid black;
929
margin:2px;
930
}
931
932
</style>
933
934
<body>
935
<h1 align=center>Schedule and <a href="#abstracts">Abstracts</a></h1>
936
<h3>
937
This is the schedule of talks and list of abstracts for this <a href="index.html">MSRI Parallel Computation
938
workshop</a>. There is also <a href="schedule.pdf">a PDF schedule</a> and
939
<a href="abstracts.pdf">a PDF list of abstracts</a>.
940
</h3>
941
<h3 align=center>
942
<a href="#monday">Monday</a> | <a href="#tuesday">Tuesday</a> |
943
<a href="#wednesday">Wednesday</a> |
944
<a href="#thursday">Thursday</a> |
945
<a href="#friday">Friday</a>
946
</h3>
947
<div class='bar'></div>
948
""")
949
950
for d in days:
951
a.write(d.html())
952
a.write("<br><div class='bar'></div><br>")
953
954
a.write('<a name="abstracts"><h1>Abstracts</h1>')
955
for s in speakers:
956
a.write(s.html_full())
957
a.write('<br><br><hr>')
958
959
a.write('</body></html>')
960
961
a.close()
962
963
964
965
def pdf_sched(file='a', verbose=False):
966
a = open('%s.tex'%file,'w')
967
a.write(r"""
968
\documentclass{article}
969
\usepackage{url}
970
\usepackage{fullpage}
971
\title{Schedule: Jan 29 -- Feb 2, 2007\vspace{3ex}\mbox{}\\
972
\Large MSRI: Interactive Parallel Computation in Support of Research in\\Algebra, Geometry
973
and Number Theory}
974
\date{}
975
\begin{document}
976
\maketitle
977
\vspace{-3ex}
978
979
\mbox{}\par\noindent
980
\begin{center}
981
""".strip())
982
for d in days:
983
a.write(d.latex())
984
a.write('\\mbox{}\\vspace{4ex}\n\n\n\\par\\noindent')
985
a.write('\\end{center}')
986
a.write('\\end{document}')
987
a.close()
988
if not verbose:
989
z = '1>/dev/null'
990
else:
991
z = ''
992
os.system('pdflatex %s.tex < /dev/null %s'%(file, z))
993
994
995
def gen():
996
dir='/home/was/conferences/2007-msri-parallel/'
997
pdf_sched(file='schedule', verbose=True)
998
pdf_abstracts(file='abstracts', verbose=True)
999
html_sched(file='schedule')
1000
os.system('cp -v *.pdf *.html %s/'%dir)