######################################################################1# This file was *autogenerated* from the file sched.sage.2######################################################################3class Speaker:4def __init__(self, name, employer=None, url=None, email=None, status=None, title=None, abstract=None):5self.name = name6self.employer = employer7self.email = email8self.url = url9self.title = title10self.abstract = abstract11self.status = status12if status != 'invited' and status != 'contributed':13raise ValueError, '%s: "%s"'%(name, status)1415def __repr__(self):16return self.name1718def last_name(self):19name = self.name20if ' and ' in self.name:21name = self.name.split(' and ')[Integer(0)]22return ' '.join(name.split()[Integer(1):])2324def first_name(self):25return self.name.split()[Integer(0)]2627def __cmp__(self, right):28return cmp(self.last_name(), right.last_name())2930def tag(self):31return self.name.replace(' ','')3233def html_full(self):34return """35<a name="%s">36<h2><font color="darkred"><b>%s</b></font>:37<font color="#333333"><i>%s</i></font><br></h2>38%s<br><br>39%s"""%(40self.tag(), self.last_name(), self.title, self.html_name(), self.html_abstract())4142def html_abstract(self):43s = self.abstract.replace('\n\n','<p>').replace('``','"').replace("''",'"')44if '*' in s:45i = s.find('*')46s = s.replace('*', '<li>')47s = s[:i] + '<ul>' + s[i:]48s += '</ul>'49return s5051def html_short(self):52return '%s<br><b><i>%s</i></b>'%(self.html_name(), self.html_title())5354def html_name(self):55if self.url:56url = self.url57else:58url = 'mailto:%s'%self.email59if self.employer:60emp = ' - %s'%self.employer61else:62emp = ''63return '<a href="%s">%s %s</a>'%(url, self.name, emp)6465def html_title(self):66return '<a href="#%s">%s</a>'%(self.tag(), self.title)676869############################################################################707172def wiki_full(self):73return """== %s: %s ==7475[%s %s %s]7677%s78"""%(self.last_name(), self.title, self.wiki_url(), self.name, self.wiki_employer(), self.wiki_abstract())7980def wiki_employer(self):81if self.employer == '':82return ''83else:84return '- %s'%self.employer8586def wiki_abstract(self):87ab = self.abstract.replace('``','"').replace("''",'"')88if '*' in ab:89return ab.strip()90return misc.word_wrap(ab).strip()9192def wiki_short(self):93return '%s<br>%s'%(self.wiki_name(), self.wiki_title())9495def wiki_url(self):96if self.url:97url = self.url98else:99url = 'mailto:%s'%self.email100return url101102103############################################################################104105def latex_full(self):106s = """107{\\large \\bf %s: {\\em\sf %s}}\\vspace{1ex}\\newline108{\em %s}\\vspace{1ex}\\newline109\url{%s}\\vspace{1ex}\\newline110{%s}111"""%(112self.last_name(), self.title, self.latex_name(), self.latex_url(), self.latex_abstract())113return s.strip()114115def latex_abstract(self):116s = self.abstract117if '*' in s:118i = s.find('*')119s = s.replace('*', '\\item')120s = s[:i] + '\\begin{itemize}' + s[i:]121s += '\\end{itemize}'122return s123124def latex_short(self):125return '%s - {\\em\\sf %s}'%(self.last_name(), self.title)126127def latex_name(self):128if self.employer:129emp = ' - %s'%self.employer130else:131emp = ''132return '%s %s'%(self.name, emp)133134def latex_url(self):135if self.url:136return self.url137return self.email138139140141speakers = [\142Speaker('David Bailey',143'Lawrence Berkeley Labs (LBL)',144'http://crd.lbl.gov/~dhbailey/',145'',146'invited',147'Experimental Mathematics and High-Performance Computing',148"""149Recent developments in ``experimental mathematics'' have underscored the value of high-performance computing in modern mathematical research. The most frequent computations that arise here are high-precision (typically several-hundred-digit accuracy) evaluations of integrals and series, together with integer relation detections using the ``PSLQ'' algorithm. Some recent highlights in this arena include: (2) the discovery of ``BBP'-type formulas for various mathematical constants, including pi and log(2); (3) the discovery of analytic evaluations for several classes of multivariate zeta sums; (4) the discovery of Apery-like formulas for the Riemann zeta function at integer arguments; and (5) the discovery of analytic evaluations and linear relations among certain classes of definite integrals that arise in mathematical physics. The talk will include a live demo of the ``experimental mathematician's toolkit''.150"""),151Speaker('Robert Bradshaw',152'University of Washington',153'',154'[email protected]',155'contributed',156'Loosely Dependent Parallel Processes',157"""158Many parallel computational algorithms involve dividing the problem into several smaller tasks and running each task in isolation in parallel. Often these tasks are the same procedure over a set of varying parameters. Inter-process communication might not be needed, but the results of one task may influence what subsequent tasks need to be performed. I will discuss the concept of job generators, or custom-written tasks that generate other tasks and process their feedback. I would discuss this specifically in the context of integer factorization.159"""),160161Speaker('Henry Cohn',162'Microsoft Research',163'http://research.microsoft.com/~cohn/',164'',165'invited',166'Parallel Computation Tools for Research: A Wishlist',167''),168169Speaker('Gene Cooperman',170'Northeastern University',171'http://www.ccs.neu.edu/home/gene/',172'',173'invited',174'Disk-Based Parallel Computing: A New Paradigm',175"""176One observes that 100 local commodity disks of an array have approximately the same streaming bandwidth as a single RAM subsystem. Hence, it is proposed to treat a cluster as if it were a single computer with tens of terabytes of data, and with RAM serving as cache for disk. This makes feasible the solution of truly large problems that are currently space-limited. We also briefly summarize other recent activities of our working group: lessons from supporting ParGAP and ParGCL; progress toward showing that 20 moves suffice to solve Rubik's cube; lessons about marshalling from support of ParGeant4 (parallelization of a million-line program at CERN); and experiences at the SCIEnce workshop (symbolic-computing.org), part of a 5-year, 3.2 million euro, European Union project. Our new distributed checkpointing package now provides a distributed analog of a SAVE-WORKSPACE command, for use in component-based symbolic software, such as SAGE."""),177178Speaker('Alan Edelman',179'MIT',180'http://www-math.mit.edu/~edelman/',181'',182'invited',183'Interactive Parallel Supercomputing: Today: MATLAB(r) and Python coming Cutting Edge: Symbolic Parallelism with Mathematica(r) and MAPLE(r)',184"""Star-P is a unique technology offered by Interactive Supercomputing after185nurturing at MIT. Star-P through its abstractions is solving the ease of use186problem that has plagued supercomputing. Some of the innovative features of187Star-P are the ability to program in MATLAB, hook in task parallel codes188written using a processor free abstraction, hook in existing parallel codes,189and obtain the performance that represents the HPC promise. All this is190through a client/server interface. Other clients such as Python or R could191be possible. The MATLAB, Python, or R becomes the "browser." Parallel192computing remains challenging, compared to serial coding but it is now that193much easier compared to solutions such as MPI. Users of MPI can plug in194their previously written codes and libraries and continue forward in Star-P.195196Numerical computing is challenging enough in a parallel environment,197symbolic computing will require even more research and more challenging198problems to be solved. In this talk we will demonstrate the possibilities199and the pitfalls.200"""),201202Speaker('Brian Granger',203'Tech X Corp.',204'http://txcorp.com',205'',206'invited',207'Interactive Parallel Computing using Python and IPython',208"""209Interactive computing environments, such as Matlab, IDL and210Mathematica are popular among researchers because their211interactive nature is well matched to the exploratory nature of212research. However, these systems have one critical weakness:213they are not designed to take advantage of parallel computing214hardware such as multi-core CPUs, clusters and supercomputers.215Thus, researchers usually turn to non-interactive compiled216languages, such as C/C++/Fortran when parallelism is needed.217218In this talk I will describe recent work on the IPython project219to implement a software architecture that allows parallel220applications to be developed, debugged, tested, executed and221monitored in a fully interactive manner using the Python222programming language. This system is fully functional and allows223many types of parallelism to be expressed, including message224passing (using MPI), task farming, shared memory, and custom user225defined approaches. I will describe the architecture, provide an226overview of its basic usage and then provide more sophisticated227examples of how it can be used in the development of new parallel228algorithms. Because IPython is one of the components of the SAGE229system, I will also discuss how IPython's parallel computing230capabilities can be used in that context.231"""),232233Speaker('Robert Harrison',234'Oak Ridge National Lab',235'http://www.csm.ornl.gov/ccsg/html/staff/harrison.html',236'',237'invited',238'Science at the petascale: tools in the tool box',239"""240Petascale computing will require coordinating the actions of 100,000+241processors, and directing the flow of data between up to six levels242of memory hierarchy and along channels that differ by over a factor of243100 in bandwidth. Amdahl's law requires that petascale applications244have less than 0.001% sequential or replicated work in order to245be at least 50% efficient. These are profound challenges for all but246the most regular or embarrassingly parallel applications, yet we also247demand that not just bigger and better, but fundamentally new science.248In this presentation I will discuss how we are attempting to confront249simultaneously the complexities of petascale computation while250increasing our scientific productivity. I hope that I can convince you251that our development of MADNESS (multiresolution adaptive numerical252scientific simulation) is not as crazy as it sounds.253254This work is funded by the U.S. Department of Energy, the division of255Basic Energy Science, Office of Science, and was performed in part256using resources of the National Center for Computational Sciences, both257under contract DE-AC05-00OR22725 with Oak Ridge National Laboratory.258"""),259260Speaker('Bill Hart',261'Warwick',262'http://www.maths.warwick.ac.uk/~masfaw/',263'',264'invited',265'Parallel Computation in Number Theory',266"""267This talk will have two sections. The first will268introduce a new library for number theory which is269under development, called FLINT. I will discuss the270various algorithms already available in FLINT, compare271them with similar implementations available elsewhere,272and speak about what the future holds for FLINT, with273the focus on parallel processing and integration into274Pari and the SAGE package.275276The second part of the talk will focus on low level277implementation details of parallel algorithms in278number theory. In particular I will discuss the design279decisions that we have made so far in the FLINT280library to facilitate multicore and multiprocessor281platforms.282283If time permits, there will be a live demonstration.284"""285),286287Speaker('Yozo Hida',288'UC Berkeley',289'http://www.cs.berkeley.edu/~yozo/',290'',291'invited',292'Moving Lapack and ScaLapack to Higher Precision without Too Much Work',293"""I will be discussing recent developments in Lapack and ScaLapack294libraries, along with some recent work on incorporating higher295precision into Lapack and ScaLapack."""),296297Speaker('Samee Khan',298'University of Texas, Arlington',299'',300'[email protected]',301'contributed',302'Game Theoretical Solutions for Data Replication in Distributed Computing Systems',303"""304Data replication is an essential technique employed to reduce the user305perceived access time in distributed computing systems. One can find numerous306algorithms that address the data replication problem (DRP) each contributing in307its own way. These range from the traditional mathematical optimization308techniques, such as, linear programming, dynamic programming, etc. to the309biologically inspired meta-heuristics. We aim to introduce game theory as a new310oracle to tackle the data replication problem. The beauty of the game theory311lies in its flexibility and distributed architecture, which is well-suited to312address the DRP. We will specifically use action theory (a special branch of313game theory) to identify techniques that will effectively and efficiently solve314the DRP. Game theory and its necessary properties are briefly introduced,315followed by a through and detailed mapping of the possible game theoretical316techniques and DRP. As an example, we derive a game theoretical algorithm for317the DRP, and propose several extensions of it. An elaborate experimental setup318is also detailed, where the derived algorithm is comprehensively evaluated319against three conventional techniques, branch and bound, greedy and genetic320algorithms.321"""),322323Speaker('Ilias Kotsireas',324'Laurier University, Canada',325'',326'[email protected]',327'contributed',328'Combinatorial Designs: constructions, algorithms and new results',329"""330We plan to describe recent progress in the search for combinatorial designs of331high order. This progress has been achieved via some algorithmic concepts, such332as the periodic autocorrelation function, the discrete Fourier transform and333the power spectral density criterion, in conjunction with heuristic334observations on plausible patterns for the locations of zero elements. The335discovery of such patterns is done using meta-programming and automatic code336generation (and perhaps very soon data mining algorithms) and reveals the337remarkable phenomenon of crystalization, which does not yet possess a338satisfactory explanation. The resulting algorithms are amenable to parallelism339and we have implemented them on supercomputers, typically as implicit parallel340algorithms.341"""),342343Speaker('Anton Leykin',344'IMA (Minessota)',345'',346'leyki[email protected]',347'contributed',348'Parallel computation of Grobner bases in the Weyl algebra',349"""350The usual machinery of Grobner bases can be applied to non-commutative algebras351of the so-called solvable type. One of them, the Weyl algebra, plays the352central role in the computations with $D$-modules. The practical complexity of353the Grobner bases computation in the Weyl algebra is much higher than in the354(commutative) polynomial rings, therefore, calling naturally for parallel355computation. We have developed an algorithm to perform such computation356employing the master-slave paradigm. Our implementation, which has been carried357out in C++ using MPI, draws ideas from both Buchberger algorithm and358Faugere's $F_4$. It exhibits better speedups for the Weyl algebra in359comparison to polynomial problems of the similar size.360"""),361362Speaker('Jason Martin',363'James Madison University',364'http://www.math.jmu.edu/~martin/',365'',366'invited',367'MPMPLAPACK: The Massively Parallel Multi-Precision Linear Algebra Package',368"""369For several decades, researchers in the applied fields have had access370to powerful linear algebra packages designed to run on massively371parallel systems. Libraries such as ScaLAPACK and PLAPACK provide a372rich set of functions (usually based on BLAS) for performing linear373algebra over single or double precision real or complex data.374However, such libraries are of limited use to researchers in discrete375mathematics who often need to compute with multi-precision data types.376377This talk will cover a massively parallel multi-precision linear378algebra package that I am attempting to write. The goal of this C/MPI379library is to provide drop-in parallel functionality to existing380number theory and algebraic geometry programs (such as Pari, Sage, and381Macaulay2) while preserving enough flexibility to eventually become a382full multi-precision version of PLAPACK. I will describe some383architectural assumptions, design descisions, and benchmarks made so384far and actively solicit input from the audience (I'll buy coffee for385the person who suggests the best alternative to the current name).386"""),387388Speaker('Marc Moreno Maza',389'Western Ontario',390'http://www.csd.uwo.ca/~moreno/',391'',392'invited',393'Component-level Parallelization of Triangular Decompositions',394"""395We discuss the parallelization of algorithms for solving polynomial systems symbolically by way of triangular decompositions. We introduce a component-level parallelism for which the number of processors in use depends on the geometry of the solution set of the input system. Our long term goal is to achieve an efficient multi-level parallelism: coarse grained (component) level for tasks computing geometric objects in the solution sets, and medium/fine grained level for polynomial arithmetic such as GCD/resultant computation within each task.396397Component-level parallelism belongs to the class of dynamic irregular parallel applications, which leads us to address the following questions: How to discover and use geometrical information, at an early stage of the solving process, that would be favorable to component-level parallel execution and load balancing? How to use this level of parallel execution to effectively eliminate unnecessary computations? What implementation mechanisms are feasible?398399We report on the effectiveness of the approaches that we have applied, including ``modular methods'', ``solving by decreasing order of dimension'', ``task cost estimation for guided scheduling''. We have realized a preliminary implementation on a SMP using multiprocessed parallelism in Aldor and shared memory segments for data communication. Our experimentation shows promising speedups for some well-know problems. We expect that this speedup would add a multiple factor to the speedup of medium/fine grained level parallelization as parallel GCD/resultant computations.400"""),401402Speaker('Alfred Noel',403'UMass Boston / MIT',404'http://www.math.umb.edu/~anoel/',405'',406'invited',407'Structure and Representations of Real Reductive Lie Groups: A Computational Approach',408"""409I work with David Vogan (MIT) on the Atlas of Lie Groups and Representations. This is a project to make available information about representations of semi-simple Lie groups over real and p-adic fields. Of particular importance is the problem of the unitary dual: classifying all of the irreducible unitary representations of a given Lie group.410411I will present some of the main ideas behind the current and very preliminary version of the software. I will provide some examples also. Currently, we are developing sequential algorithms that are implemented in C++. However, because of time and space complexity we are slowly moving in the direction of parallel computation. For example, David Vogan is experimenting with multi-threads in the K-L polynomials computation module.412413This talk is in memory of Fokko du Cloux, the French mathematician who, until a few months ago, was the lead developer. He died this past November.414"""),415416Speaker('Clement Pernet',417'University of Waterloo',418'',419'[email protected]',420'invited',421'Parallelism perspectives for the LinBox library',422"""423LinBox is a generic library for efficient linear algebra with blackbox424or dense matrices over a finite field or Z. We first present a few425notions of the sequential implementations of selected problems, such426as the system resolution or multiple triangular system resolution, or427the chinese remaindering algorithm. Then we expose perspectives for428incorporating parallelism in LinBox, including multi-prime lifting for429system resolution over Q, or parallel chinese remaindering. This last430problem raises the difficult problem of combining early termination431and work-stealing techniques.432"""),433434Speaker('Yi Qiang',435'University of Washington',436'',437'http://www.yiqiang.net/',438'invited',439'Distributed Computing using SAGE',440"""441Distributed SAGE (DSAGE) is a distributed computing framework for442SAGE which allows users to easily parallelize computations and443interact with them in a fluid and natural way. This talk will be444focused on the design and implementation of the distributed computing445framework in SAGE. I will describe the application of the446distributed computing framework to several problems, including the447problem of integer factorization and distributed ray tracing.448Demonstrations of using Distributed SAGE to tackle both problems will449be given plus information on how to parallelize your own problems. I450will also talk about design issues and considerations that have been451resolved or are yet unresolved in implementing Distributed SAGE.452"""),453454Speaker('Jean-Louis Roch',455'ID-IMAG (France)',456'http://www-id.imag.fr/Laboratoire/Membres/Roch_Jean-Louis/perso.html',457'[email protected]',458'invited',459'Processor oblivious parallel algorithms with provable performances: applications',460"""461Based on a work-stealing schedule, the on-line coupling of two algorithms462(one sequential; the other one recursive parallel and fine grain) enables463the design of programs that scale with provable performances on various464parallel architectures, from multi-core machines to heterogeneous grids,465including processors with changing speeds. After presenting a generic scheme466and framework, on top of the middleware KAAPI/Athapascan that efficiently467supports work-stealing, we present practical applications such as: prefix468computation, real time 3D-reconstruction, Chinese remainder modular lifting469with early termination, data compression.470"""),471472Speaker('Vladimir Tonchev',473'Michigan Tech',474'',475'[email protected]',476'contributed',477'Combinatorial designs and code synchronization',478"""479Difference systems of sets are combinatorial designs that arise in connection480with code synchronization. Algebraic constructions based on cyclic difference481sets and finite geometry and algorithms for finding optimal difference systems482of sets are discussed.483"""),484485Speaker('Jan Verschelde',486'UIC',487'http://www.math.uic.edu/~jan/',488'[email protected]',489'invited',490'Parallel Homotopy Algorithms to Solve Polynomial Systems',491"""492A homotopy is a family of polynomial systems which defines a deformation493from a system with known solutions to a system whose solutions are needed.494Via dynamic load balancing we may distribute the solution paths so that a495close to optimal speed up is achieved. Polynomial systems -- such as the4969-point problem in mechanical design leading to 286,720 paths -- whose497solving required real supercomputers twenty years ago can now be handled498by modest personal cluster computers, and soon by multicore multiprocessor499workstations. Larger polynomial systems however may lead to more500numerical difficulties which may skew the timing results, so that501attention must be given to ``quality up'' as well. Modern homotopy methods502consist of sequences of different families of polynomial systems so that503not only the solution paths but also parametric polynomial systems must be504exchanged frequently.505"""),506507Speaker('Thomas Wolf and Winfried Neun',508'',509'',510'[email protected] [email protected]',511'contributed',512'Parallel sparsening and simplification of systems of equations',513"""514In a Groebner Basis computation the guiding principle for pairing and515`reducing' equations is a total ordering of monomials or of derivatives for516differential Groebner Bases. If reduction based on an ordering is replaced by517reduction to minimize the number of terms of an equation through another518equation then on the downside the resulting (shorter) system does depend on the519order of pairing of equations for shortening but on the upside there are number520of advantages that makes this procedure a perfect addition/companion to the521Groebner Basis computation. Such features are:522523* In contrast to Groebner Basis computations, this algorithm is safe in the sense that it does not need any significant amount of memory, even not temporarily.524* It is self-enforcing, i.e. the shorter equations become, the more useful for shortening other equations they potentially get.525* Equations in a sparse system are less coupled and a cost effective elimination strategy (ordering) is much easier to spot (for humans and computers) than for a dense system.526* Statistical tests show that the probability of random polynomials to factorize increases drastically the fewer terms a polynomial has.527* By experience the shortening of partial differential equations increases their chance to become ordinary differential equations which are usually easier to solve explicitly.528* The likelihood of shortenings to be possible is especially high for large overdetermined systems. This is because the number of pairings goes quadratically with the number of equations but for overdetermined systems, more equations does not automatically mean more unknowns to occur which potentially obstruct shortening by introducing terms that can not cancel.529* The algorithm offers a fine grain parallelization in the computation to shorten one equation with another one and a coarse grain parallelization in that any pair of two equations of a larger system can be processed in parallel. In the talk we will present the algorithm, show examples supporting the above statements and give a short demo.530"""),531532Speaker('Kathy Yelick',533'UC Berkeley',534'http://www.cs.berkeley.edu/~yelick/',535'[email protected]',536'invited',537'Programming Models for Parallel Computing',538"""539The introduction of multicore processors into mainstream computing is540creating a revolution in software development. While Moore's541Law continues to hold, most of the increases in transistor density will be542used for explicit, software-visible parallelism, rather than increasing543clock rate. The major open question is how these machines will be544programmed.545In this talk I will give an overview of some of the hardware trends, and546describe programming techniques using Partitioned Global Address Space547(PGAS)548languages. PGAS languages have emerged as a viable alternative to message549passing programming models for large-scale parallel machines and clusters.550They also offer an alternative to shared memory programming models (such as551threads and OpenMP) and the possibility of a single programming model that552will work well across a wide range of shared and distributed memory553platforms.554PGAS languages provide a shared memory abstraction with support for locality555through the user of distributed data types. The three most mature PGAS556languages (UPC, CAF and Titanium) offer a statically partitioned global557address space with a static SPMD control model, while languages emerging558from the DARPA HPCS program are more dynamic. I will describe these559languages as well as our experience using them in both numeric and560symbolic applications.""")561562]563564565def find(name):566name = name.lower()567ans = None568for v in speakers:569if name in v.name.lower():570if not ans is None:571raise RuntimeError, "ambiguous search for '%s'"%name572ans = v573if ans is None:574raise RuntimeError, "Speaker '%s' not found"%name575return ans576577578579##############580581def html_talks(file='a'):582a = open('%s.html'%file,'w')583a.write(r"""584<html>585<head>586<title>587Interactive Parallel Computation in Support of Research588in Algebra, Geometry and Number Theory: Abstracts589</title>590591<style>592div.box {593border:1px solid #004400;594padding:10px;595margin-left:30px;596margin-right:30px;597}598table {599border-bottom:1px solid lightgray;600border-top:1px solid lightgray;601}602603a:active { color: #ff0000; }604a:hover { background-color: #aaffaa}605a { text-decoration: none; }606607div.space {608padding:50px;609margin-top:15px;610background-color:#eeeeee;611}612613614h2.top {615text-align:center;616}617618div.bar {619padding:1px;620background-color:#999999;621border-top: 1px solid black;622border-bottom: 1px solid black;623margin:2px;624}625626</style>627628<body>629<h1 align=center>Titles and Abstracts</h1>630631These are the abstracts for all the talks scheduled for <a href="index.html">MSRI Parallel Computation632the workshop</a>, listed in633alphabetical order. For times, see the <a href="schedule.html">the schedule</a> itself.634<br>635<hr>636""")637for s in speakers:638a.write(s.html_full())639a.write('<br><br><hr>')640a.write('</body></html>')641a.close()642643def wiki_talks():644a = open('%s.txt'%file,'w')645a.write(r"""646= Titles and Abstracts =647These are the abstracts for all the talks scheduled for [:msri07: the workshop], listed in648alphabetical order. For times, see the [:msri07/schedule: schedule] itself.649650[[TableOfContents]]651652""")653654for s in speakers:655a.write(s.wiki_full())656a.write('\n\n')657a.close()658659def pdf_abstracts(file='a', verbose=False):660a = open('%s.tex'%file,'w')661a.write(r"""662\documentclass{article}663\usepackage{url}664\usepackage{fullpage}665\title{Titles and Abstracts:\vspace{4ex}\mbox{}\\666\Large Interactive Parallel Computation in Support of Research in\\Algebra, Geometry667and Number Theory\vspace{4ex}\mbox{}\\668\large A Workshop at MSRI Jan 29-Feb 2 organized by\\Burhanuddin, Demmel, Goins, Kaltofen, Perez, Stein, Verrill, and Weening}669\begin{document}670\maketitle671\par\noindent672""")673for s in speakers:674a.write(s.latex_full())675a.write('\\mbox{}\\vspace{6ex}\n\n\n\\par\\noindent')676a.write('\\end{document}')677a.close()678if not verbose:679z = '1>/dev/null'680else:681z = ''682os.system('pdflatex %s.tex < /dev/null %s'%(file, z))683684########################################################################################685686class Day:687def __init__(self, name, theme='', discussion='', invited=[], contributed=[]):688self.name = name689self.theme = theme690self.discussion = discussion691self.invited = invited692self.contributed = contributed693694def __repr__(self):695return self.name696697def html_contrib(self, n):698c = self.contributed699if n < len(c):700if 'Cohn' in c[n].name:701return c[n].html_short() + ' (part %s)'%n702else:703return '(Optional) ' + c[n].html_short()704else:705return 'Break'706707def latex_contrib(self, n):708c = self.contributed709if n < len(c):710if 'Cohn' in c[n].name:711return c[n].latex_short() + ' (part %s)'%n712else:713return '(Optional) '+ c[n].latex_short()714else:715return 'Break'716717def html(self):718s = """719<a name="%s">720<table class="ws" width=90%% align=center>721<tr><td class='time' width=10%%></td><td class="day" width=80%%><font size=+3><b>%s</b><br><font size=+2><b>Theme: %s</b></font></td></tr>722<tr><td class='time'>9:00-10:00</td><td>%s</td></tr>723<tr><td class='time'>10:00-10:30</td><td class="break">Tea Break</td></tr>724<tr><td class='time'>10:30-11:30</td><td>%s</td></tr>725<tr><td class='time'>11:30-12:30</td><td>%s</td></tr>726<tr><td class='time'>12:30-1:30</td><td class="break">Lunch</td></tr>727<tr><td class='time'>1:30-2:00</td><td>%s</td></tr>728<tr><td class='time'>2:00-2:30</td><td>%s</td></tr>729<tr><td class='time'>2:30-3:30</td><td class="discuss"><b>Discussion:</b> %s</td></tr>730"""%(731self.name.split()[Integer(0)].strip(',').lower(),732self.name, self.theme,733self.invited[Integer(0)].html_short(),734self.invited[Integer(1)].html_short(),735self.invited[Integer(2)].html_short(),736self.html_contrib(Integer(0)),737self.html_contrib(Integer(1)),738self.discussion)739if self.name != 'Friday, Feb 2':740s += """741<tr><td class='time'>3:30-4:00</td><td class="break">Tea Break</td></tr>742<tr><td class='time'>4:00-5:30</td><td>Working Sessions</td></tr>743<tr><td class='time'>6:00-8:00</td><td class="break">Dinner</td></tr>744<tr><td class='time'>8:00-10:00</td><td class="break">Coffee Shops...</td></tr>745"""746s += """747</table>748"""749return s750751def latex(self):752s = """753{\\Large \\bf %s}\\vspace{1ex}754755\\begin{tabular}{|l|l|}\\hline756& \\begin{minipage}{0.7\\textwidth}Theme: %s\\end{minipage} \\\\ \\hline7579:00--10:00 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\75810:00--10:30 & \\begin{minipage}{0.7\\textwidth}Tea Break\\end{minipage} \\\\75910:30--11:30 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\76011:30--12:30 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\ \\hline76112:30--1:30 & \\begin{minipage}{0.7\\textwidth}Lunch\\end{minipage} \\\\7621:30--2:00 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\7632:00--2:30 & \\begin{minipage}{0.7\\textwidth}%s\\end{minipage} \\\\ \\hline7642:30--3:30 & \\begin{minipage}{0.7\\textwidth}Discussion: %s\\end{minipage} \\\\765"""%(766self.name, self.theme,767self.invited[Integer(0)].latex_short(),768self.invited[Integer(1)].latex_short(),769self.invited[Integer(2)].latex_short(),770self.latex_contrib(Integer(0)),771self.latex_contrib(Integer(1)),772self.discussion773)774if self.name != 'Friday, Feb 2':775s += """7763:30--4:00 & \\begin{minipage}{0.7\\textwidth}Tea Break\\end{minipage} \\\\7774:00--5:30 & \\begin{minipage}{0.7\\textwidth}Working Sessions\\end{minipage} \\\\7786:00--8:00 & \\begin{minipage}{0.7\\textwidth}Dinner\\end{minipage} \\\\7798:00--10:00 & \\begin{minipage}{0.7\\textwidth}Coffee Shops...\\end{minipage} \\\\ \\hline780"""781s += '\\end{tabular}'782return s783784days = [\785Day('Monday, Jan 29',786'What do we want and what can we expect from applying parallel techniques to pure mathematics research tools?',787"""Parallel methods for mathematics software for doing algebra, geometry788and number theory -- What can we expect? What are the right problems to attack first789and get the most for our work?790""",791[find('pernet'),792find('granger'),793find('roch')],794[find('cohn'), find('cohn')]),795796Day('Tuesday, Jan 30',797'Algebra',798'Parallel methods for algebra (commutative algebra, linear algebra, group theory).',799[find('yelick'), find('hida'), find('noel')],800[find('Leykin'), find('tonchev')]801),802803Day('Wednesday, Jan 31',804'Number Theory',805"Parallel methods for number theory.",806[find('martin'), find('hart'), find('qiang')],807[find('bradshaw'), find('kotsireas')]808),809810Day('Thursday, Feb 1',811'Geometry',812'Parallel methods for geometry',813[find('verschelde'), find('moreno'), find('bailey')],814[find('wolf'), find('neun')]),815816Day('Friday, Feb 2',817'Large-Scale Parallel Computation',818'Wrap-up session',819[find('harrison'), find('cooperman'), find('edelman')],820[find('khan')])821]822823824def html_sched(file='a'):825a = open('%s.html'%file,'w')826a.write(r"""827<html>828<head>829<title>830Interactive Parallel Computation in Support of Research831in Algebra, Geometry and Number Theory: Schedule832</title>833834<style>835div.box {836border:1px solid #004400;837padding:10px;838margin-left:30px;839margin-right:30px;840}841842table.ws {843border-width: 1px 1px 1px 1px;844border-spacing: 0px;845border-style: solid solid solid solid ;846border-color: gray gray gray gray;847border-collapse: separate;848background-color: #333355;849}850table.ws th {851border-width: 1px 1px 1px 1px;852padding: 1px 1px 1px 1px;853border-style: solid solid solid solid ;854border-color: gray gray gray gray;855background-color: white;856-moz-border-radius: 0px 0px 0px 0px;857padding:10px;858}859table.ws td {860border-width: 1px 1px 1px 1px;861padding: 1px 1px 1px 1px;862border-style: solid solid solid solid ;863border-color: gray gray gray gray;864background-color: white;865-moz-border-radius: 0px 0px 0px 0px;866padding:10px;867}868table.ws td.break {869border-width: 1px 1px 1px 1px;870padding: 1px 1px 1px 1px;871border-style: solid solid solid solid ;872border-color: gray gray gray gray;873background-color: #80ff80;874-moz-border-radius: 0px 0px 0px 0px;875padding:10px;876}877878table.ws td.day {879border-width: 1px 1px 1px 1px;880padding: 1px 1px 1px 1px;881border-style: solid solid solid solid ;882border-color: gray gray gray gray;883background-color: #e0e0ff;884-moz-border-radius: 0px 0px 0px 0px;885padding:10px;886}887888table.ws td.discuss {889border-width: 1px 1px 1px 1px;890padding: 1px 1px 1px 1px;891border-style: solid solid solid solid ;892border-color: gray gray gray gray;893background-color: #e0ffe0;894-moz-border-radius: 0px 0px 0px 0px;895padding:10px;896}897898table.ws td.time {899border-width: 1px 1px 1px 1px;900padding: 1px 1px 1px 1px;901border-style: solid solid solid solid ;902border-color: gray gray gray gray;903background-color: #ffffe0;904-moz-border-radius: 0px 0px 0px 0px;905padding:10px;906}907908a:active { color: #ff0000; }909a:hover { background-color: #aaffaa}910a { text-decoration: none; }911912div.space {913padding:50px;914margin-top:15px;915background-color:#eeeeee;916}917918919h2.top {920text-align:center;921}922923div.bar {924padding:1px;925background-color:#999999;926border-top: 1px solid black;927border-bottom: 1px solid black;928margin:2px;929}930931</style>932933<body>934<h1 align=center>Schedule and <a href="#abstracts">Abstracts</a></h1>935<h3>936This is the schedule of talks and list of abstracts for this <a href="index.html">MSRI Parallel Computation937workshop</a>. There is also <a href="schedule.pdf">a PDF schedule</a> and938<a href="abstracts.pdf">a PDF list of abstracts</a>.939</h3>940<h3 align=center>941<a href="#monday">Monday</a> | <a href="#tuesday">Tuesday</a> |942<a href="#wednesday">Wednesday</a> |943<a href="#thursday">Thursday</a> |944<a href="#friday">Friday</a>945</h3>946<div class='bar'></div>947""")948949for d in days:950a.write(d.html())951a.write("<br><div class='bar'></div><br>")952953a.write('<a name="abstracts"><h1>Abstracts</h1>')954for s in speakers:955a.write(s.html_full())956a.write('<br><br><hr>')957958a.write('</body></html>')959960a.close()961962963964def pdf_sched(file='a', verbose=False):965a = open('%s.tex'%file,'w')966a.write(r"""967\documentclass{article}968\usepackage{url}969\usepackage{fullpage}970\title{Schedule: Jan 29 -- Feb 2, 2007\vspace{3ex}\mbox{}\\971\Large MSRI: Interactive Parallel Computation in Support of Research in\\Algebra, Geometry972and Number Theory}973\date{}974\begin{document}975\maketitle976\vspace{-3ex}977978\mbox{}\par\noindent979\begin{center}980""".strip())981for d in days:982a.write(d.latex())983a.write('\\mbox{}\\vspace{4ex}\n\n\n\\par\\noindent')984a.write('\\end{center}')985a.write('\\end{document}')986a.close()987if not verbose:988z = '1>/dev/null'989else:990z = ''991os.system('pdflatex %s.tex < /dev/null %s'%(file, z))992993994def gen():995dir='/home/was/conferences/2007-msri-parallel/'996pdf_sched(file='schedule', verbose=True)997pdf_abstracts(file='abstracts', verbose=True)998html_sched(file='schedule')999os.system('cp -v *.pdf *.html %s/'%dir)1000