for "garbage" and "collection" and "1984"
Search term: garbage;collection;1984
No spelling errors allowed, case-insensitive, partial words match.
Information on how to form queries.
@InCollection{Borgwardt84, author = "P. Borgwardt", title = "Parallel Prolog Using Stack Segments on Shared-memory Multiprocessors", booktitle = "1984 International Symposium on Logic Programming", publisher = "IEEE ISBN: 0 8186 0522 7", address = "New York, USA", year = "1984", keywords = "Program Compilers; High Level Languages; Parallel Processing; Programming; Software Engineering", abstract = "A method of parallel evaluation for standard PROLOG for shared-memory multiprocessors is presented that is a natural extension of the current methods of compiling PROLOG for sequential execution. In particular, the method exploits stack-based evaluation with stack segments spread across several processors to reduce the amount of runtime storage needed and hence to reduce the occurrence of garbage collection in the distributed computation. And parallelism and stream parallelism are the most important sources of concurrent execution in this method; these are implemented using local process lists; idle processors may scan these and execute any process which is ready to execute. Or parallelism is less important, but the method does not implement it with hash table windows into a binary tree of or contexts when it is requested in the source program.", note = "CH2007-3/84/0002\$01.00", } @InCollection{Warren84a, author = "D. S Warren", title = "Efficient {PROLOG} memory management for flexible control strategies", booktitle = "1984 International symposium on logic programming", pages = "198--202", publisher = "IEEE, ISBN: 0 8186 0522 7", address = "NEW YORK", year = "1984", keywords = "Storage management and garbage collection", abstract = "A memory management technique for representing and manipulating terms in a PROLOG system is described that allows for control strategies other than just depth-first. The algorithm itself is not very complex, being a generalization of the standard algorithm. It is interesting because it is essentially as efficient as the standard algorithms for those parts of the search that are depth-first. This makes it particularly applicable to control strategies in which the search is locally depth-first, yet globally not. Techniques for handling deterministic nodes, separating the memory allocation of global and local variables, and handling tail recursion can all be generalized to apply with this algorithm.", note = "U. S. Copyright Clearance Center Code: CH2007-3/84/0000-0198\$01.00", }
@InProceedings{Brooks84d, author = "Rodney A. Brooks", title = "Trading Data Space for Reduced Time and Code Space in Real-Time Garbage Collection on Stock Hardware", booktitle = "LISP and Functional Programming. Conference Record of the 1984 ACM Symposium, Austin, Texas, August 6-8, 1984", organization = ACM, address = "New York", editor = "Prgrm. Chrm. {G. L. Steele, Jr.}", number = "ISBN 0-89791-142-3", pages = "256--262", keywords = "LISP", year = "1984", } @InProceedings{Moon84, author = "David A. Moon", title = "Garbage Collection in a Large {LISP} System", booktitle = "LISP and Functional Programming. Conference Record of the 1984 ACM Symposium, Austin, Texas, August 6-8, 1984", organization = ACM, address = "New York", editor = "Prgrm. Chrm. {G. L. Steele, Jr.}", number = "ISBN 0-89791-142-3", pages = "235--246", keywords = "LISP", year = "1984", }
@Book{Naka84a, author = "Katsuhiko Nakamura", title = "Associative concurrent evaluation of logic programs", publisher = "School of Science and Engineering Tokyo Denki University", address = "Tokyo", year = "1984", keywords = "prolog garbage-collection,architecture", note = "Month August", }
@PhdThesis{Mo:phd, author = "K. A. Mohammed-Ali", title = "Object-Oriented Storage Management and Garbage Collection in Distributed Processing Systems", school = "Royal Institute of Technology, Dept. of Computer Systems", address = "Stockholm, Sweden", year = "1984", abstract = "We propose difefrent distributed, object-oriented storage management and garbage collection systems and solve several implementation problems associated with such systems. Discusses both global and local/global schemes.", }
@Book{IB-Z84325, editor = "J. A. Campbell", title = "Implementations of Prolog", edition = "1", publisher = "Ellis Horwood", address = "Chichester", year = "1984", ISBN = "0-85312-675-5", descriptor = "Implementierung, Prolog", annote = "Das Buch enthaelt zahlreiche Artikel zu speziellen Gesichtspunkten der Sprache Prolog. Nach einigen Uebersichtsartikeln werden Implementierungsaspekte wie das Suchverfahren, Backtracking, Unifikation und Garbage Collection. Weiterhin werden Erweiterungen von Prolog betrachtet. Hier sind vor allem intelligente Backtracking-Strategien, erweiterte Unifikationsalgorithmen und Prolog auf Nichtstandardrechnern, wie etwa verteilten und Multiprozessorsystemen zu nennen.", }
@InCollection{MulTan85, author = "S. J. Mullender and A. S. Tanenbaum", editor = "S. J. Mullender", title = "A Distributed File Service Based on Optimistic Concurrency Control", booktitle = "The Amoeba distributed operating system: Selected papers 1984-1987", pages = "185--207", publisher = "Centrum voor Wiskunde en Informatica , Amsterdam", month = "[12]", year = "1985", keywords = "File System Amoeba", abstract = "Principles are presented for a distributed file and database system that leaves a large degree of freedom to the users of the system. It can be used as an efficient storage medium for files, but also as a basis for a distributed data base system. An optimistic concurrency control mechanism, based on the simultaneous existance of several versions of a file or data base is used. Each version provides to the client that owns it, a consistent view of the contents of the file at the time of the versions creation. We show how this mechanism works, how it can be implemented and how serialisability of concurrent access is enforced. A garbage collector that runs independant of, and in parallel with, the operation of the system is also presented.", note = "Comment 1 by schlenk, Thu Jun 23 22:51:38 1988 The Amoeba filesystem is based on a tree of pages. Each page is named by a path leading to it, that includes previous data or filename pages. Transactions are supported by versions which makes this filesystem an ideal basis for databases.", }
@Proceedings{ACM84, title = "{ACM} Symposium on Lisp and Functional Programming", publisher = "ACM", year = "1984", keywords = "FP, slfp, lfp", abstract = "ISBN 0-89781-142-3 1 R. A. Brooks, R. P. Gabriel. A critique of common lisp 9 R. H. Halstead jr. Implementation of multilisp: lisp on a multiprocessor 18 C. T. Haynes, D. P. Friedman. Engines build process abstractions 25 R. P. Gabriel, J. McCarthy. Queue based multi processing lisp 45 P. Wadler. Listlessness is better than laziness: lazy evaluation and garbage collection at compile time 53 A. Goldberg, R. Paige. Stream processing 63 F. Bellegarde. Rewriting systems on FP expressions that reduce the number of sequences they yield 74 J. S. Givler, R. B. Kieburtz. Schema recognition for program transformation. 85 P. Conte, X. Rodel. Formes: an object and time oriented system for music composition and synthesis 96 R. B. Dannenberg. Arctic: a functional language for real time control 104 M. Sheeran. muFP a lanhuage for VLSI design 113 J. Chailloux, M. Devin, JM. Hullot. Le lisp a portable and efficient lisp system 123 G. Stefan, et al. Dialisp - a lisp machine 129 R. L. Bates. D. Dyer, M. Feber. Recent developments in ISI interlisp 140 H. G. Okuno et al. Tao: a fast interpreter centred lisp system on lisp machine elis 150 S. Wholey, S. E. Fahlman. The design of an instruction set for common lisp 159 W. R. Stoye, T. J. W. Clarke, A. C. Norman. Some practical methods for rapid combinator reduction 167 P. Hudak, B. Goldberg. Experiments in diffused combinator reduction 177 R. E. Griswold. Expression evaluation in the icon programming language 184 R. Milner. A proposal for standard ML 198 D. MacQueen. Modules for standard ML 208 L. Cardelli. Compiling a functional language. 218 L. Augustsson. A compiler for lazy ML 228 E. Saint-James. Recursion is more efficient than iteration 235 D. A. Moon. Garbage collection in a large lisp system. 247 H. Lieberman. Steps towards better debugging tools for lisp 256 R. A. Brooks. Trading data space for reduced time and code space in real time garbage collection on stock hardware 263 T. Katayama. Type inference and type checking for functional programming languages 273 A. Pettorossi. A powerful strategy for deriving efficient programs by transformations 282 W. Dosch, B. Moller. Busy and lazy FP with infinite objects 293 C. T. Haynes, D. P. Friedman, M. Wand. Continuations and coroutines. 299 H. Stoyan. Early lsip history (56-59) 311 G. Smolka. Making control and data flow in large programs explicit 323 Y. Malachi, R. Waldinger. Tablog: the deductive tableau programming language 331 J. des Rivieres, B. C. Smith. The implementation of procedurally reflective languages 348 D. P. Friedman, M. Wand. Reification: reflection without metaphysics 356 W. Clinger. The scheme 311 compiler: an exercise in denotational semantics", } @InProceedings{Ungar84, author = "D. Ungar", title = "Generation scavenging: a non-disruptive high performance storage reclamation algorithm.", booktitle = "ACM Sigsoft/Sigplan Software Engineering Symposium on Practical Software Development Environments.", pages = "157--167", month = may, year = "1984", keywords = "FP, functional, applicative, programming, garbage collection", abstract = "Claims uses $<$ 2\% CPU time on one smalltalk implementation. Variation on copying compactor collectors. Objects that survive several collections assumed to remain useful and become `old'; not scavenged again.", }
@Article{Ungar:1, author = "D. Ungar and R. Blau and P. Foley and D. Samples and D. Patterson", title = "Architecture of {SOAR}: Smalltalk on a Risc", year = "1984", journal = "11th Annual International Symposium on Computer Architecture", institution = "SIGARCH", pages = "188--197", keywords = "object oriented architectures reduced instruction set architectures tagged object oriented architectures garbage collection", }
@InProceedings{Halstead84, author = "R. H. Halstead", title = "Implementation of Multilisp: Lisp on a Multiprocessor", booktitle = "Conference Record of the 1984 ACM Symposium on LISP and Functional Programming, Austin, TX", pages = "9--17", publisher = "ACM", address = "New York, NY", year = "1984", keywords = "parallel lisp", abstract = "Multilisp is an extension of Lisp (more specifically, of the Lisp dialect Scheme) with additional operators and additional semantics to deal with parallel execution. It is being implemented on th 32-processor Concert multiprocessor. The current implementation is complete enough to run the Multilisp compiler itself, and has been run on Concert prototypes including up to four processors. Novel techniques are used for task scheduling and garbage collection. The task scheduler helps control excessive resource utilization by means of an unfair scheduling policy: the garbage collector uses a multiprocessor algorithm modeled after the incremental garbage collector of Baker. A companion paper discusses language design issues relating to Multilisp.", } @InProceedings{Wadler84, author = "P. L. Wadler", title = "Listlessness is Better Than laziness: Lazy Evaluation and Garbage Collection at Compile Time", booktitle = "Conference Record of the 1984 ACM Symposium on LISP and Functional Programming, Austin, TX", pages = "45--52", publisher = "ACM", address = "New York, NY", year = "1984", keywords = "functional laziness pipes transformation architecture bounded evaluation", }
@InProceedings{Borgwardt:1984:PPU, author = "P. Borgwardt", title = "Parallel Prolog Using Stack Segments on Shared-memory Multiprocessors", crossref = "IEEE:1984:ISL", pages = "??", year = "1984", note = "CH2007-3/84/0002\$01.00.", abstract = "A method of parallel evaluation for standard PROLOG for shared-memory multiprocessors is presented that is a natural extension of the current methods of compiling PROLOG for sequential execution. In particular, the method exploits stack-based evaluation with stack segments spread across several processors to reduce the amount of runtime storage needed and hence to reduce the occurrence of garbage collection in the distributed computation. And parallelism and stream parallelism are the most important sources of concurrent execution in this method; these are implemented using local process lists; idle processors may scan these and execute any process which is ready to execute. Or parallelism is less important, but the method does not implement it with hash table windows into a binary tree of or contexts when it is requested in the source program.", bibdate = "Thu Jul 21 09:37:44 1994", keywords = "Program Compilers; High Level Languages; Parallel Processing; Programming; Software Engineering", }
@Article{Hickey:1984:PAF, author = "Tim Hickey and Jacques Cohen", title = "Performance Analysis of On-the-Fly Garbage Collection", journal = "Communications of the ACM", volume = "27", number = "11", pages = "1143--1154", month = nov, year = "1984", bibsource = "ftp://ftp.ira.uka.de/pub/bibliography/Compiler/garbage.collection.bib", }
@Article{COPELAND82, key = "Copeland", author = "G. Copeland", title = "What If Mass Storage Were Free?", journal = "IEEE Computer", volume = "15", number = "7", month = jul, year = "1982", pages = "27--35", abstract = "This article takes the idea of ever-decreasing mass storage costs to its absolute limit and examines the hypothetical effects that free mass storage would likely have on the design and use of future data-base systems. Unfortunately, because of the broad scope of the topics discussed herein, it is not possible to describe each of them in depth. Instead, the goal here is to provide an overall picture of mass storage systems so that the reader can see how the pieces fit together in a compatible and consistent way.", annote = "The following advantages accrue from a non-deletion strategy: improved functionality, access to past states, use in accounting systems, elimination of complex garbage collection mechanisms, improved reliability, throughput, and availability, and synchronization of distributed data bases. Two problems with optical disks are noted: the solid-state laser read/write heads currently have a limited lifespan and therefore require periodic replacement, and first-generation optical disks do not facilitate small insertions because optical disks, like magnetic disks, have unacceptable raw error rates.", bibdate = "Wed Sep 26 10:52:16 1984", }
@Article{Bena84, author = "Mordechai Ben-Ari", title = "{Algorithms for On-the-fly Garbage Collection}", journal = "{ACM Transactions on Programming Languages and Systems}", volume = "6", number = "3", month = jul, year = "1984", pages = "333--344", owner = "pcl", }
@TechReport{pdf0149, key = "Nakamura", author = "Katsuhiko Nakamura", title = "Associative concurrent evaluation of logic programs", address = "Tokyo", year = "1984", month = aug, institution = "School of Science and Engineering Tokyo Denki University", keywords = "garbage-collection,architecture", }
@Article{Christopher:1984:RCG, author = "T. W. Christopher", title = "Reference count garbage collection", journal = "Software\emdash Practice and Experience", volume = "14", number = "6", pages = "503--507", month = jun, year = "1984", acknowledgement = "Nelson H. F. Beebe, Center for Scientific Computing, Department of Mathematics, University of Utah, Salt Lake City, UT 84112, USA, Tel: +1 801 581 5254, FAX: +1 801 581 4148, e-mail: \path|beebe@math.utah.edu|", bibdate = "Wed Aug 24 12:18:38 MDT 1994", keywords = "algorithms", subject = "E.1 Data, DATA STRUCTURES, Lists", }
@TechReport{MIT/LCS/TM-267, author = "R. Schooler and J. W. Stamos", title = "{PROPOSAL} {FOR} {A} {SMALL} {SCHEME} {IMPLEMENTATION}", institution = "MIT Laboratory for Computer Science", number = "MIT/LCS/TM-267", pages = "20", month = oct, year = "1984", price = "USD 5.00", keywords = "scheme, Macintosh, bytecodes, language implementation, garbage collection, tail recursion, closures", }
Found 20 references in 16 bibliographies.
You used 19 seconds of our CPU time.