for "garbage" and "collection" and "1988"
Search term: garbage;collection;1988
No spelling errors allowed, case-insensitive, partial words match.
Information on how to form queries.
@TechReport{AB:VGC, author = "Andrew W. Appel and Aage Bendiksen", title = "Vectorized Garbage Collection", institution = "Princeton University", number = "CS-TR-169-88", year = "1988", month = jul, }
@TechReport{Ba:CGCAR, author = "${}^{\clubsuit}$Joel F. Bartlett", title = "Compacting Garbage Collection With Ambiguous Roots", type = "Technical Report", number = "88/2", institution = DECWRL, address = "Palo Alto, California", month = feb, keywords = "copying garbage collection, compact, storage, heap, lisp, scheme, mahler, pointers", annote = "Excellent trick here---make newness a page property, not an address-range property", year = "1988", } @Article{DBTDCStocs88, author = "Douglas B. Terry and Daniel C. Swinehart", title = "Managing Stored Voice in the Etherphone System", journal = "ACM Transactions on Computer Systems", volume = "6", number = "1", pages = "3--27", month = feb, year = "1988", abstract = "The voice mailer in the Etherphone system provides facilities for recording, editing, and playing stored voice in a distributed personal-computing environment. It provides the basis for applications such as voice mail, annotation of multimedia documents, and voice editing using standard text-editing techniques. To facilitate sharing, the voice manager stores voice on a special voice file server that is accessible via the local internet. Operations for editing a passage of recorded voice simply build persistent data structures to represent the edited voice. These data structures, implementing an abstraction called voice ropes, are stored in a server database and consist of lists of intervals within voice files. Clients refer to voice ropes solely by reference. Interests, additional persistent data structures maintained by the server, serve two purposes: First, they provide a sort of directory service for managing the voice ropes that have been created. More importantly, they provide a reliable reference-counting mechanism, permitting the garbage collection of voice ropes that are no longer needed. These interests are grouped into classes; for some important classes, obsolete interests can be detected and deleted by a class-specific algorithm that runs periodically.", keywords = "OS, networking, protocol design, multimedia, file server, ropes, intensions, capabilities, reference counts, garbage collection, digitized voice, network services, storage reclamation, voice-annotated documents, voice editing, voice file server", } @Article{Appel:AWL, author = "Andrew W. Appel", title = "Allocation without Locking", journal = "Software Practice and Experience", volume = "19", number = "7", month = jul, year = "1989", pages = "703--705", note = "Also appeared as technical report, Princeton University {CS-TR-182-88}, sep. 1988", abstract = "In a programming environment with both concurrency and automatic garbage collection, the allocation and initialization of a new record is a sensitive matter. Parallel implementations usually use a locking or semaphore mechanism to ensure that allocation is an atomic operation. This locking significantly adds to the cost of an allocation. This paper shows how allocation can run extremely quickly even in a multi-thread environment: open-coded, without locking. Key idea is that the allocation instruction sequence is highly stylized, so check when doing a garbage collection if a thread is suspended in the middle of an allocation, and complete it on the thread's behalf.", keywords = "garbage collection, dynamic memory allocation, malloc, concurrency, synchronization", } @Article{BW:GCIUE, author = "Hans-Juergen Boehm and Mark Weiser", title = "Garbage Collection in an Uncooperative Environment", journal = "Software Practice and Experience", volume = "18", number = "9", month = sep, year = "1988", pages = "807--820", abstract = "We describe a technique for storage allocation and garbage collection in the absence of significant co-operation from the code using the allocator. This limits garbage collection overhead to the time actually required for garbage collection. In particular, application programs that rarely or never make use of the collector no longer encounter a substantial performance penalty. This approach greatly simplifies the implementation of languages supporting garbage collection. It further allows conventional compilers to be used with a garbage collector, either as the primary means of storage reclamation, or as a debugging tool. Our approach has two potential disadvantages. First, some garbage may fail to be reclaimed. Secondly, we use a 'stop and collect' approach, thus making the strategy unsuitable for applications with severe real-time constraints. We argue that the first problem is, to some extent, inherent in any garbage collection system. Furthermore, based on our experience, it is usually not significant in practice. In spite of the second problem, we have had favourable experiences with interactive applications, including some that use a heap of several megabytes.", keywords = "garbage collection, storage management, debugging", } @MastersThesis{He:mthesis, title = "Compile time Garbage Collection", author = "Lucy Hederman", school = "Rice University", address = "Houston, Texas", year = "1988", } @Book{FiHa88, author = "A. J. Field and P. G. Harrison", title = "Functional Programming", publisher = "Addison-Wesley", year = "1988", keywords = "many programming examples in {HOPE}, comparison with {MIRANDA}, {FP}, {LISP}, basics of lambda-calculus, type checking, type inference, environment-based implementation (eager, lazy), graph-based implementation (G-machine, ...), introduction to program transformation, overview garbage collection, appendix: basic domain theory", comments = "Signatur: {P FIE 13164}", } @TechReport{JD88470, author = "Douglas Johnson", title = "Trap Architectures for Lisp Systems", number = "{UCB//CSD-88-470}", year = "1988", month = nov, pages = "22", institution = "University of California, Berkeley", annote = "Recent measurement of Lisp systems show a dramatic skewing of operation frequency. For example, small integer (fixation) arithmetic dominates most programs, but other number types can occur on almost any operation. Likewise, few memory references trigger special banding for garbage collection, but nearly all memory operations could trigger such special handling. Systems like SPARC and SPUR have shown that small amounts of special hardware can significantly reduce the need for inline software checks by trapping when an unusual condition is detected. A system's trapping architecture now becomes key to performance. In most systems, the trap architecture now becomes key to performance. In most systems, the trap architecture is intended to handle errors (e.g., address faults). The requirements for Lisp traps are quite different. In particular, the trap frequency is higher, processing time per trap is shorter, and must need to be handled in the user's address space and context. This paper looks at these requirements, evaluates current trap architectures, and proposes enhancements for meeting those requirements. These enhancements increase performance for Lisp 9%-32% at cost of about 1.4% more CPU logic.", } ID:: UCB//CSD-88-443 ENTRY:: July 28, 1993 TITLE:: A Prolog Garbage Collector for Aquarius DATE:: August 15, 1988 AUTHOR:: Touati, Herve PAGES:: 54 ABSTRACT:: Our design is the result of an attempt to incorporate into Prolog implementations the ideas which made generation scavenging successful for Lisp and Smalltalk. The main challenge was to take Prolog technique of memory recovery upon backtracking based on stack deallocation. We were able to do so with little extra overhead top of the global stack. This strategy has several advantages: it improves the locality of the executing program by keeping the data structures compacted and by allocating new objects in a fixed part of the address space; it improves the locality and the predictability of the garbage collection, which can concentrate its efforts on the fixed size area where new objects are allocated; and it allows us to use simpler, time-efficient garbage collection algorithms. The performance of the algorithm is further enhanced by the use of copying algorithms whenever made possible by the deterministic nature of the executing program. @PhdThesis{Wiseman88, author = "Simon Robert Wiseman", title = "Garbage collection in distributed systems", year = "1988", school = "University of Newcastle upon Tyne", } @InProceedings{AEL:RTCGCSM, author = "${}^{\clubsuit}$Andrew W. Appel and John R. Ellis and Kai Li", title = "Real-time Concurrent Garbage Collection on Stock Multiprocessors", crossref = "PLDI88", key = "", pages = "11--20", comment = "Available as DEC SRC Tech report number 25, Feb 14, 1988. Also {SIGPLAN} Notices 23:(7), July 1988", note = "Refinement of Baker's algorithm", } @PhdThesis{EJphd88, title = "Object Mobility in a Distributed Object-Oriented System", author = "Eric Jul", school = "University of Washington", address = "Computer Science Department", month = dec, year = "1988", abstract = "This dissertation studies the movement of objects both large and small within a locally distributed system. We propose that object mobility at a fine granularity can be a powerful tool for simplifying the construction of distributed applications. The unit of mobility in previous systems has typically been an address space. We propose using data of any size as the unit of mobility. Our study of fine-grained mobility is based on a new programming language called Emerald for which we have implemented a run-time kernel. Emerald supports a single model of computation: the object. All objects are described using a uniform semantic model. They can move freely within the system to take advantage of distribution and respond to dynamically changing requirements. We say that Emerald has fine-grained mobility because {\em all} objects can move regardless of size. Process migration is supported; light-weight processes that are executing within a migrating object are also migrated on-the-fly. Our design of Emerald emphasizes efficiency, in particular, efficient communication between potentially distributed objects located on the same machine. By integrating distribution concepts into the programming language, the compiler is able to generate more efficient code for objects that do not require full generality. Emerald supports call-by-reference semantics---even for remote procedure calls. We introduce a special parameter passing mode, {\em call-by-move} which can be used to move parameters for efficiency reasons. We present a design for a distributed on-the-fly garbage collector. The design includes a novel faulting mechanism that allows user processes to proceed during the collection. This disseratation discusses aspects of language design related to mobility and the design and implementation of an efficient Emerald kernel. Measurements of the system and of several small applications are presented.", keywords = "mobility, migration, garbage collecting, remote procedure call, Emerald, distributed objects", } @MastersThesis{Heng88, author = "Seng-Lai Heng", title = "Performance evaluation of numerous garbage collection algorithms by real-time simulation", year = "1988", school = "University of Texas at Austin", keywords = "garbage collection, simulation methods, real-time", }
@InBook{ALBANO88, key = "Albano et al", author = "A. Albano and G. Ghelli and R. Orsini", title = "The Implementation of Galileo's Persistent Values", booktitle = "Data Types and Persistence", publisher = "Springer-Verlag", year = "1988", chapter = "16", pages = "253--263", abstract = "Galileo is a conceptual language for database applications in which the persistence of values is an orthogonal property, i.e., values of any type are persistent as long as they are accessible from the top level environment. In Galileo providing such property poses difficult problems since the language is based on a heap memory management, with variable size elements and an incremental garbage collection, and it allows user control of failures and undo of updates. The interaction of these features is described and the approach adopted in the implementation now underway is discussed.", bibdate = "Mon Nov 6 16:02:02 1989", owner = "robyn", }
@InCollection{MulTan85, author = "S. J. Mullender and A. S. Tanenbaum", editor = "S. J. Mullender", title = "A Distributed File Service Based on Optimistic Concurrency Control", booktitle = "The Amoeba distributed operating system: Selected papers 1984-1987", pages = "185--207", publisher = "Centrum voor Wiskunde en Informatica , Amsterdam", month = "[12]", year = "1985", keywords = "File System Amoeba", abstract = "Principles are presented for a distributed file and database system that leaves a large degree of freedom to the users of the system. It can be used as an efficient storage medium for files, but also as a basis for a distributed data base system. An optimistic concurrency control mechanism, based on the simultaneous existance of several versions of a file or data base is used. Each version provides to the client that owns it, a consistent view of the contents of the file at the time of the versions creation. We show how this mechanism works, how it can be implemented and how serialisability of concurrent access is enforced. A garbage collector that runs independant of, and in parallel with, the operation of the system is also presented.", note = "Comment 1 by schlenk, Thu Jun 23 22:51:38 1988 The Amoeba filesystem is based on a tree of pages. Each page is named by a path leading to it, that includes previous data or filename pages. Transactions are supported by versions which makes this filesystem an ideal basis for databases.", } @Article{Wilson88, author = "P. R. Wilson", title = "Opportunistic Garbage Collection", journal = "ACM SIGPLAN", volume = "23", number = "12", pages = "98--102", month = "[12]", year = "1988", abstract = "Opportunistic garbage collection is a non-incremental generation-based garbage collection system. It attempts to minimize the probability of disruptive pauses by careful scheduling them at low points in the stack height, where live data tend to be at a minimum. These heuristics can be surprisingly simple and cheap to implement - user input primitives provide an effective hook from which to invoke the scheduling routine, since they tend to correspond both to local stack minima and to computational pause boundaries. An additional mechanism is proposed to detect times when it is safe to scavenge an intermediate generation, based on the amount of data surviving from a new-generation scavenge. This mechanism can be used reliably in certain cases, or heuristically in a larger class of cases.", } @Article{Bevan88, author = "D. I. Bevan", title = "An Efficient Reference Counting Solution to the Distributed Garbage Collection Problem", journal = "Parallel Computing", volume = "9", pages = "179--192", publisher = "Elsevier Science Publishers B.V.", year = "1988", abstract = "A good programming language permits the programmer to concentrate on his application rather than on low-level implementation details. In particular, he does not have to concern himself with storage allocation because memory management is dealt with efficiently be the implementation of the language. To reclaim disused storage for reuse the implementation imcorporates a garbage collection algorithm. When the language is implemented on a distributed multiprocessor architecture, this algorithm ideally collects garbage as soon as it is created and has minimal overheads in terms of space requirements and interprocess communications. We describe here an elegant algorithm with these properties which makes use of reference counting.", } @Article{AtkNac88, author = "M. C. Atkins and L. R. Nackman", title = "The Active Deallocation of Objects in Object-Oriented Systems", journal = "Software Practice and Experience", volume = "18", number = "11", pages = "1073--1089", month = "[11]", year = "1988", abstract = "In object-oriented systems, it is often useful for objects to be allowed to carry out some action before they are deallocated. This can be done by defining a destroy method in the object's class, and arranging for the memory system to send a message invoking this method immediately before deallocating the object. This allows resources associated with the object to be returned to the system, limited cross-language garbage collection, and other, more complex, behaviour. During the execution of the destroy method it is possible for new references to objects to be created. Care must be taken that the garbage collection does not erroneously free such objects. Algorithms are presented to implement destroy methods in systems using reference counting and mark-scan garbage collection techniques. Properties that are desirable in such systems are also discussed.", } @Article{Nilsen88, author = "K. Nilsen", title = "Garbage Collection of Strings and Linked Data Structures in Real Time", journal = "Software, Practice and Experience", volume = "18", number = "7", pages = "613--640", publisher = "John Wiley & Sons , New York, NY , USA", month = "[7]", year = "1988", } @Article{Pixley88, author = "C. Pixley", title = "An Incremental Garbage Collection Algorithm for Multi-Mutator Systems", journal = "Distributed Computing", volume = "3", number = "1", pages = "41--50", publisher = "Springer-Verlag , Berlin, Heidelberg, New York, Tokyo , D", year = "1988", }
@InCollection{Halstead88, author = "R. H. Halstead", editor = "T. Ito and R. H. Halstead", title = "New Ideas in Parallel Lisp: Language Design, Implementation, and Programming Tools", booktitle = "Parallel Lisp: Languages and Systems, Sendai, Japan", pages = "2--57", publisher = "Springer-Verlag", address = "Berlin, DE", year = "1988", keywords = "functional", abstract = "A Lisp-based approach is attractive for parallel computing since Lisp languages and systems assume significant clerical burdens, such as storage management. Parallel Lisps thus enable programmers to focus on the new problems introduced by using concurrency. Parallel Lisps now exist that can execute realistic applications with ``industrial-strength'' performance, but there are applications whose requirements they do not handle elegantly. Recent Work has contributed new, elegant ideas in the area of speculative computation, continuations, exception handling, aggregate data structures, and scheduling. Using these ideas, it should be possible to build ``second generation'' parallel Lisp systems that are as powerful and elegantly structured as sequential Lisp systems. This paper surveys these recent ideas and explores how they could fit together in the parallel Lisp systems of the future, examining issues at three levels: language design, implementation techniques, and programming tools. The discussion is based on the Multilisp programming language, which is Scheme (a Lisp dialect) extended with the future construct. The paper outlines three criteria for judging Scheme extensions for parallel computing: compatibility with sequential Scheme, invariance of the result when future is introduced into side-effect-free Scheme programs, and modularity. Proposed language mechanisms, such as support of first-class continuations, are evaluated against these criteria. In the area of implementation techniques, results of experiments with lazy task creation, unfair scheduling, and parallel garbage collection are surveyed; some areas that need more investigation, such as scheduler implementation for speculative computing, and interaction between user-level and operating-system schedulers, are identified. Finally, past work in tools to help with the development of Multilisp programs is surveyed, and needs for additional tools is discussed.", note = "Lecture Notes in Computer Science 441.", }
@Article{Appleby:1988:GCP, author = "K. Appleby and M. Carllson and S. Haridi and D. Sawhlin", title = "Garbage collection for {Prolog} based on {WAM}", journal = "Communications of the ACM", volume = "31", number = "6", pages = "719--741", month = jun, year = "1988", ISSN = "0001-0782", acknowledgement = "Nelson H. F. Beebe, Center for Scientific Computing, Department of Mathematics, University of Utah, Salt Lake City, UT 84112, USA, Tel: +1 801 581 5254, FAX: +1 801 581 4148, e-mail: \path|beebe@math.utah.edu|", bibdate = "Sun Aug 14 18:32:13 MDT 1994", keywords = "algorithms; design; languages; performance; theory", subject = "F.1.1 Theory of Computation, COMPUTATION BY ABSTRACT DEVICES, Models of Computation \\ F.2.2 Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Sorting and searching \\ D.3.2 Software, PROGRAMMING LANGUAGES, Language Classifications, Prolog", }
@InProceedings{Caplinger88, author = "Michael Caplinger", title = "A Memory Allocator with Garbage Collection for {C}", booktitle = "USENIX Conference Proceedings", location = "Bell Communications Research", pages = "325--330", publisher = "USENIX", address = "Dallas, TX", month = "Winter", year = "1988", }
@TechReport{Soba88, author = "Patrick G. Sobalvarro", title = "{Dynamic Garbage Collection and Memory Management in Lucid Common Lisp}", institution = "Lucid Inc.", address = "Menlo Park, California", year = "1988", owner = "pcl", descr = "pllsp", } @TechReport{GKN*88, author = "A. Goto and Y. Kimura and T. Nakagawa and T. Chikayama", title = "{Lazy Reference Counting Method --- An Incremental Garbage Collection Method for Parallel Inference Machines}", institution = "Institute for New Generation Computer Technology", address = "ICOT Research Center, Tokyo, Japan", month = feb, year = "1988", type = "Technical Report", number = "TR-338", owner = "pcl", descr = "pllog,pagc", } @TechReport{Naka88, author = "K. NakaJima", title = "{Piling GC-Efficient Garbage Collection for AI Languages-}", institution = "Institute for New Generation Computer Technology", address = "ICOT Research Center, Tokyo, Japan", month = mar, year = "1988", type = "Technical Report", number = "TR-354", owner = "pcl", descr = "pllog,pagc", } @TechReport{NKM88, author = "K. Nishida and Y. Kimura and A. Matsumoto", title = "{Evaluation of the Effect of Incremental Garbage Collection by MRB on FGHC Parallel Execution Performance}", institution = "Institute for New Generation Computer Technology", address = "ICOT Research Center, Tokyo, Japan", month = jun, year = "1988", type = "Technical Report", number = "TR-394", owner = "pcl", descr = "pllog,psben,pagc", }
@InProceedings{Hals89, author = "Robert H. Halstead", title = "{New Ideas in Parallel Lisp: Language Design, Implementation, and Programming Tools}", booktitle = "Parallel Lisp: Languages and Systems", address = "Sendai, Japan, June 5--8", year = "1988", editor = "T. Ito and R. H. Halstead", volume = "441", series = "Lecture Notes in Computer Science", publisher = "Springer, Berlin", pages = "2--57", abstract = "A Lisp-based approach is attractive for parallel computing since Lisp languages and systems assume significant clerical burdens, such as storage management. Parallel Lisps thus enable programmers to focus on the new problems introduced by using concurrency. Parallel Lisps now exist that can execute realistic applications with ``industrial-strength'' performance, but there are applications whose requirements they do not handle elegantly. Recent Work has contributed new, elegant ideas in the area of speculative computation, continuations, exception handling, aggregate data structures, and scheduling. Using these ideas, it should be possible to build ``second generation'' parallel Lisp systems that are as powerful and elegantly structured as sequential Lisp systems. This paper surveys these recent ideas and explores how they could fit together in the parallel Lisp systems of the future, examining issues at three levels: language design, implementation techniques, and programming tools. The discussion is based on the Multilisp programming language, which is Scheme (a Lisp dialect) extended with the {\tt future} construct. The paper outlines three criteria for judging Scheme extensions for parallel computing: compatibility with sequential Scheme, invariance of the result when {\tt future} is introduced into side-effect-free Scheme programs, and modularity. Proposed language mechanisms, such as support of first-class continuations, are evaluated against these criteria. In the area of implementation techniques, results of experiments with lazy task creation, unfair scheduling, and parallel garbage collection are surveyed; some areas that need more investigation, such as scheduler implementation for speculative computing, and interaction between user-level and operating-system schedulers, are identified. Finally, past work in tools to help with the development of Multilisp programs is surveyed, and needs for additional tools is discussed.", keywords = "Lisp, Futures", }
@TechReport{Birrell88, author = "Andrew D. Birrell and Michael B. Jones and Edward P. Wobber", title = "A Simple and Efficient Implementation for Small Databases.", institution = "Digital Equipment Corporation, Systems Research Centre", number = "24", pages = "13 pages.", month = "30 " # jan, year = "1988", abstract = "This paper describes a technique for implementing the sort of small databases that frequently occur in the design of operating systems and distributed systems. We take advantage of the existence of very large virtual memories, and quite large real memories, to make the technique feasible. We maintain the database as a strongly typed data structure in virtual memory, record updates incrementally on disk in a log, and occasionally make a checkpoint of the entire database. We recover from crashes by restoring the database from an old checkpoint, then replaying the log. We use existing packages to convert between strongly typed data objects and their disk representations, and to communicate strongly typed data across the network (using remote procedure calls). Our memory is managed entirely by a general purpose allocator and garbage collector. This scheme has been used to implement a name server for a distributed system. The resulting implementation has the desirable property of being simultaneously simple, efficient, and reliable. for {"}small databases.{"} A small database is a data structure that is small enough to fit in virtual memory and requires the fast read access times that memory provides, yet is also modified fairly frequently and must be recorded on a storage medium that survives crashes. The technique suggested in this report stores the database's current state in memory, then applies well-known database methods in a simple way to make the storage nonvolatile. The technique seems obvious once you've learned about it, but it is by no means widely known or used. Small databases are frequently found in today's operating systems, but they are usually implemented using ad hoc methods that are not as efficient and reliable as the technique described here. Although the authors' abstract mentions garbage collection and generalized packages for transferring typed data across the network or to disk, the technique remains useful even in environments that lack such niceties. For example, while I was at Stanford I used it in a simple name server for the V system, written in C with hand-coded procedures for reading and writing checkpoints and log entries. Once I learned about the technique (from a brief mention in an earlier paper), I found it easy to implement and was quickly convinced it was the best approach for my application. If I had had time, I would have gone back and converted our authentication server to use it as well. Tim Mann", } @TechReport{Ellis88, author = "John R. Ellis and Kai Li and Andrew W. Appel", title = "Real-time Concurrent Collection on Stock Multiprocessors.", institution = "Digital Equipment Corporation, Systems Research Centre", number = "25", pages = "24 pages.", month = "14 " # feb, year = "1988", abstract = "We've designed and implemented a copying garbage-collection algorithm that is efficient, real-time, concurrent, runs on commercial uniprocessors and shared-memory multiprocessors, and requires no change to compilers. The algorithm uses standard virtual-memory hardware to detect references to {"}from space{"} objects and to synchronize the collector and mutator threads. We've implemented and measured a prototype running on SRC's 5-processor Firefly. It will be straightforward to merge our techniques with generational collection. An incremental, non-concurrent version could be implemented easily on many versions of Unix. program in order to trace all of the reachable objects and sweep up the garbage, a procedure that can take several seconds. Because of this obtrusiveness, many programs (interactive applications and systems programs, for instance) cannot use such a garbage collector. Recent garbage collector designs interrupt the program more often but for shorter periods, giving interactive response. But each of these designs suffers from at least one of these drawbacks: it doesn't collect circular structures, it requires microcode support, it doesn't support multithreaded applications or exploit shared-memory multiprocessors, or it requires an extra level of indirection in every pointer. This report describes a garbage collector design with none of these drawbacks, and gives the results of testing a prototype implementation of this garbage collector on a shared-memory multiprocessor. Mark R. Brown", } @TechReport{Cardelli88, author = "Luca Cardelli and James Donahue and Lucille Glassman and Mick Jordan and Bill Kalsow and Greg Nelson", title = "Modula-3 Report.", institution = "Digital Equipment Corporation, Systems Research Centre", number = "31", pages = "55 pages.", month = "25 " # aug, year = "1988", abstract = "This report is the definition of the programming language Modula-3, which was designed by the DEC Systems Research Center and the Olivetti Research Center, with the guidance and inspiration of Niklaus Wirth. resembles Object Pascal, Oberon, and Euclid. A notable feature of this language family is the use of modules to delineate the separation between the implementation and the use of interfaces; modules allow separate compilation without sacrificing the strong typechecking of languages like Pascal. Modula-3 supports object-oriented programming, garbage collection, exception handling, lightweight processes, and the isolation of unsafe features. It incorporates a new type system that is simpler, more uniform, and more powerful than those of its ancestors. It is unusual to find so many good things in such a small language, and unusual for such a brief language manual to be so complete. The designers were determined that the language would be definable in a readable fifty-page manual. They succeeded admirably; both the language and its exposition are exemplary. This report will certainly interest anyone concerned with modular programming languages, type systems, or programming language definition. James J. Horning", } @TechReport{Cardelli89b, author = "Luca Cardelli and James Donahue and Lucille Glassman and Mick Jordan and Bill Kalsow and Greg Nelson", title = "Modula-3 Report (revised)", institution = "Digital Equipment Corporation, Systems Research Centre", number = "52", pages = "71 pages.", month = "1 " # nov, year = "1989", abstract = "The goal of Modula-3 is to be as simple and safe as it can be while meeting the needs of modern systems programmers. Instead of exploring new features, we studied the features from the Modula family of languages that have proven themselves in practice and tried to simplify them and fit them into a harmonious language. We found that most of the successful features were aimed at one of two main goals: greater robustness, and a simpler, more systematic type system. Modula-3 descends from Mesa, Modula-2, Cedar, and Modula-2+. It also resembles its cousins Object Pascal, Oberon, and Euclid. Modula-3 retains one of Modula-2's most successful features, the provision for explicit interfaces between modules. It adds objects and classes, exception handling, garbage collection, lightweight processes (or threads), and the isolation of unsafe features. The Modula-3 report was published by Olivetti and Digital in August 1988. Implementation efforts followed shortly at both companies. In January 1989, the committee revised the language to reflect the experiences of these implementation teams. The main changes were the introduction of branded reference types, the requirement that opaque types be branded, the legalization of opaque supertypes, and the new flexibility in revealing information about an opaque type.", }
@TechReport{AITR-1417, title = "A Lifetime-based Garbage Collector for {LISP} Systems on General-Purpose Computers", author = "Patrick Sobalvarro", institution = "Artificial Intelligence Laboratory, Massachusetts Institute of Technology (MIT)", address = "Cambridge, Massachusetts", month = feb, year = "1988", pages = "68", url = "ftp://publications.ai.mit.edu/ai-publications/1000-1499/AITR-1417.ps.Z", abstract = "Garbage collector performance in LISP systems on custom hardware has been substantially improved by the adoption of lifetime-based garbage collection techniques. To date, however, successful lifetime-based garbage collectors have required special-purpose hardware, or at least privileged access to data structures maintained by the virtual memory system. I present here a lifetime-based garbage collector requiring no special-purpose hardware or virtual memory system support, and discuss its performance.", }
Found 33 references in 11 bibliographies.
You used 20 seconds of our CPU time.