for "garbage" and "collection" and "1989"
Search term: garbage;collection;1989
No spelling errors allowed, case-insensitive, partial words match.
Information on how to form queries.
@InCollection{B83, author = "S Jones and D Metayer", title = "Compile-time garbage collection by sharing analysis", booktitle = "FPCA'89, London, England", publisher = "ACM Press", month = sep, year = "1989", }
@Article{Cr:GCASMP, author = "J. Crammond", title = "{A Garbage Collection Algorithm for Shared Memory Parallel Processors}", journal = "International Journal of Parallel Programming", volume = "17", number = "6", year = "1989", month = dec, pages = "497--522", } @InProceedings{LL:HADSFTDGC, author = "Barbara Liskov and Rivka Ladin", title = "Highly-available distributed services and fault- tolerant distributed garbage collection", booktitle = "Fifth {ACM} Symposium on the Principles of Distributed Computing", pages = "29--39", year = "1989", }
@TechReport{Bartlett89, author = "${}^{\clubsuit}$Joel F. Bartlett", title = "Mostly-Copying Collection Picks Up Generations and {C}++", year = "1989", month = Oct, number = "TN-12", institution = "DEC Western Research Laboratory", annote = "The ``mostly-copying'' garbage collection algorithm provides a way to perform compacting garbage collection in spite of the presence of ambiguous pointers in the root set. As originally defined, each collection required almost all accesible objects to be moved. While adequate for many applications, programs that retained a large amount of storage spent a significant amount of time garbage collecting. To improve performance of these applications, a generationa version of the algorithm has been designed. This note reports on this extension of the algorithm and its application n collectors for Scheme and C++", } @Article{Appel:AWL, author = "Andrew W. Appel", title = "Allocation without Locking", journal = "Software Practice and Experience", volume = "19", number = "7", month = jul, year = "1989", pages = "703--705", note = "Also appeared as technical report, Princeton University {CS-TR-182-88}, sep. 1988", abstract = "In a programming environment with both concurrency and automatic garbage collection, the allocation and initialization of a new record is a sensitive matter. Parallel implementations usually use a locking or semaphore mechanism to ensure that allocation is an atomic operation. This locking significantly adds to the cost of an allocation. This paper shows how allocation can run extremely quickly even in a multi-thread environment: open-coded, without locking. Key idea is that the allocation instruction sequence is highly stylized, so check when doing a garbage collection if a thread is suspended in the middle of an allocation, and complete it on the thread's behalf.", keywords = "garbage collection, dynamic memory allocation, malloc, concurrency, synchronization", } @PhdThesis{Zorn:phd, author = "${}^{\clubsuit}$Benjamin Zorn", title = "Comparative Performance Evaluation of Garbage Collection Algorithms", school = "University of California at Berkeley, {EECS} Department", note = "Technical Report UCB/CSD 89/544", month = dec, year = "1989", } %L Bjor89a %K olit chloe-ftp ood89 %A Anders Bj\:ornerstedt %T Secondary Storage Garbage Collection for Decentralized Object-Based Systems %R Object Oriented Development %E D. Tsichritzis %I Centre Universitaire d'Informatique, University of Geneva %D July 1989 %P 277-319 %% MIR %X This paper describes a mechanism for secondary storage garbage collection that may be used to reclaim inaccessible resources in decentralized persistent object based systems. Schemes for object addressing and object identification are discussed and a proposal is made which handles volatile objects separately from persistent objects. The garbage collection of the space of volatile objects is decoupled from the garbage collection of the space of persistent objects. The first kind of garbage collection can avoid the complexity and overhead of a distributed algorithm by classifying "exported" objects as persistent. The problem of detecting and collecting "distributed garbage" is then deferred to garbage collection of persistent objects. %% ftp: cui.unige.ch:OO-articles/garbageCollection.ps.Z @TechReport{ACAMRP96389, title = "Objects sharing in distributed systems principles of garbage collection", author = "A. Couvert and A. Maddi and R. Pedrono", institution = "Inst. Nat. Recherche Inf. Autom.", address = "Le Chesnay, France", number = "963", year = "1989", month = jan, } @Article{Be:ERCSDGCP, author = "David I. Bevan", title = "An Efficient Reference Counting Solution to the Distributed Garbage Collection Problem", journal = "Parallel Computing", volume = "9", number = "2", year = "1989", pages = "179--192", } @TechReport{Ozawa89, author = "Toshihiro Ozawa and Akira Hosoi and Akira Hattori", title = "Generation type garbage collection for parallel logic languages", year = "1989", month = oct, institution = "Institute for New Generation Computer Technology (ICOT)", number = "TR-512", keywords = "Logic programming, garbage collection, generation", abstract = "This paper presents a garbage collection (GC) method for parallel logic programming languages. Parallel logic languages require large amounts of data since logic variables can have only one value. Efficient memory management is important for an efficient language processor. In the parallel logic language Flat Guarded Horn Clauses (FGHC), the amount of live data is always small compared to the total amount of data allocated. These are two kinds of data: short-term and long-term. We concluded that garbage collection using only 2 generations best suits this kind of language. We call our garbage collection method ``2-generation garbage collection''. Short-term data is garbage collected back into the 1st generation garbage collection and long-term data is collected into the 2nd generation garbage collection. This method is efficient independent of the ratio of the amount of live data to heap size. When this ratio is high, our method is especially good, reducing the amount of data copied by a factor of 10, compared to simple copying garbage collection.", }
@InProceedings{Queinnec89, author = "Christian Queinnec and Barbara Beaudoing and Jean-Pierre Queille", title = "{M}ark {DURING} {S}weep rather than {M}ark {THEN} {S}weep", booktitle = parle89, year = "1989", organization = lncs # " " # "365", publisher = springer, month = jun, ecritsdicsla = "1", sourcefile = "themes/GCP/gcp", ftpfile = "gcp", w3keyword = "PARLE89", abstract = "Garbage Collection frees the programmer from the burden of explicitly deallocating unused data. This facility induces a considerable overhead but also causes some delays that may affect real-time applications. Guaranteed throughput (with at most short and predictable delays) is needed in many applications such as plane or plant control and requires at least a worst case analysis to identify the performances of the whole system. Traditional GC are made of two phases~: the marker which identifies all useful data, followed by the sweeper which reclaims all useless data. On-the-fly GC schemes were introduced to permit an application and a collector to run concurrently. That concurrency may lessen the GC penalty incurred by the application. We present here a new algorithm where the application, the marker and the sweeper are concurrent. The benefit is to tightly adjust collection rate to application consumption and have an allocation time bounded by a small constant. Moreover our algorithm does not waste memory and appears to be well suited for embedded systems. This ``mark {\sc during} sweep'' algorithm is completely presented. An interesting single-processor and incremental realisation is also analysed and followed by some implementation variations.", }
@InBook{ALBANO88, key = "Albano et al", author = "A. Albano and G. Ghelli and R. Orsini", title = "The Implementation of Galileo's Persistent Values", booktitle = "Data Types and Persistence", publisher = "Springer-Verlag", year = "1988", chapter = "16", pages = "253--263", abstract = "Galileo is a conceptual language for database applications in which the persistence of values is an orthogonal property, i.e., values of any type are persistent as long as they are accessible from the top level environment. In Galileo providing such property poses difficult problems since the language is based on a heap memory management, with variable size elements and an incremental garbage collection, and it allows user control of failures and undo of updates. The interaction of these features is described and the approach adopted in the implementation now underway is discussed.", bibdate = "Mon Nov 6 16:02:02 1989", owner = "robyn", } @InProceedings{KITSUREGAWA89, key = "Kitsuregawa et al.", author = "M. Kitsuregawa and L. Harada and M. Takagi", title = "Join Strategies on {KD}-Tree Indexed Relations", booktitle = "Proceedings of the Fifth International Conference on Data Engineering", address = "Los Angeles, CA", month = feb, year = "1989", pages = "85--93", abstract = "In this paper we present efficient join algorithms on very large relations indexed by KD-trees. There are previous works proposing the join on multi-attribute clustered relations based on hashing and also on grid-partitioning, whose shortcomings are non-order preservation and low load-factor, respectively. KD-tree indexed relations are characterized by preserving data order and maintaining high load-factors. However, KD-tree indexing has the disadvantage of generating clusters which are overlapped in the join attribute domain, what causes a very high I/O cost for naive join algorithms. Here we analyze strategies to deal with this problem and introduce efficient algorithms to join two non-resident relations indexed by KD-trees. First we introduce the concept of wave, which is a set of pages that is the object of join processing and that propagates over the relation space in the direction of the join attribute axis. Based on this new concept, we present five join algorithms and also four extended algorithms with a garbage collection mechanism to increase the effective space of the main memory. We extensively evaluate these join algorithms with analytical formulas and simulation results. It is shown that the join of very large relations indexed by KD-trees can be performed with one scan of the relations.", bibdate = "Fri Jan 12 09:43:55 1990", owner = "robyn", }
@Article{KolLisWei89, author = "E. Kolodner and B. Liskov and W. Weihl", title = "Atomic Garbage Collection: Managing a Stable Heap", journal = "ACM SIGMOD RECORD", volume = "18", number = "2", year = "1989", month = jun, note = "Also published in/as: 19 ACM SIGMOD Conf. on the Management of Data, (Portland OR), May.-Jun.1989.", }
@Article{Franklin89, author = "M. Franklin and G. Copeland and G. Weikum", title = "What's Different About Garbage Collection for Persistent Programming Languages?", journal = "MCC TR", volume = "ACA-ST-062-89", address = "Austin, TX", year = "1989", keywords = "Bubba", } @InProceedings{Kolodner89, author = "E. Kolodner and B. Liskov and W. Weihl", title = "Atomic Garbage Collection: Managing a Stable Heap", booktitle = "Proc. ACM SIGMOD Conf.", pages = "15", address = "Portland, OR", month = may # "-" # jun, year = "1989", }
@Article{Wilson89, author = "P. R. Wilson", title = "A Simple Bucket-Brigade Advancement Mechanism for Generation-Based Garbage Collection", journal = "SIGPLAN Notices", volume = "24", number = "5", pages = "38--46", publisher = "ACM Press , New York, NY , USA", month = "[5]", year = "1989", } @Article{Appel89, author = "A. W. Appel", title = "Simple Generational Garbage Collection and Fast Allocation", journal = "Software, Practice and Experience", volume = "19", number = "2", pages = "171--184", publisher = "John Wiley & Sons , New York, NY , USA", month = "[2]", year = "1989", } @InProceedings{Schelv89, author = "M. Schelvis", editor = "Norman Meyrowitz", title = "Incremental Distribution of Timestamp Packets: {A} New Approach to Distributed Garbage Collection", booktitle = "Proceedings of the Conference on Object-Oriented Programming Systems, Languages, and Applications (OOPSLA)", pages = "37--48", publisher = "ACM Press , New York, NY , USA", address = "New Orleans, LA USA", month = "[10]", year = "1989", note = "Published as SIGPLAN Notices, volume 24, number 10", }
@Article{Appel89, author = "A. W. Appel", title = "Runtime tags aren't necessary", journal = "Lisp and Symbolic Computing", volume = "2", number = "2", pages = "153--162", month = jun, year = "1989", keywords = "FP, functional programming, garbage collection, GC, polymorphic type, tag", abstract = "Hargrave 005.13305 277I", }
@Article{Schelvis:1989:IDT, author = "M. Schelvis", title = "Incremental distribution of timestamp packets: a new approach to distributed garbage collection", journal = "ACM SIGPLAN Notices", volume = "24", number = "10", pages = "37--48", month = oct, year = "1989", ISSN = "0362-1340", acknowledgement = "Nelson H. F. Beebe, Center for Scientific Computing, Department of Mathematics, University of Utah, Salt Lake City, UT 84112, USA, Tel: +1 801 581 5254, FAX: +1 801 581 4148, e-mail: \path|beebe@math.utah.edu|", bibdate = "Sat Aug 13 17:16:20 MDT 1994", keywords = "algorithms; design; languages", subject = "D.3.4 Software, PROGRAMMING LANGUAGES, Processors \\ D.1.0 Software, PROGRAMMING TECHNIQUES, General \\ D.4.2 Software, OPERATING SYSTEMS, Storage Management, Allocation/deallocation strategies \\ D.4.7 Software, OPERATING SYSTEMS, Organization and Design, Distributed systems \\ G.2.2 Mathematics of Computing, DISCRETE MATHEMATICS, Graph Theory, Graph algorithms", } @Article{Wilson:1989:CSC, author = "P. R. Wilson and T. G. Moher", title = "A ``card-marking'' scheme for controlling intergenerational references in generation-based garbage collection on stock hardware", journal = "ACM SIGPLAN Notices", volume = "24", number = "5", pages = "87--92", month = may, year = "1989", ISSN = "0362-1340", acknowledgement = "Nelson H. F. Beebe, Center for Scientific Computing, Department of Mathematics, University of Utah, Salt Lake City, UT 84112, USA, Tel: +1 801 581 5254, FAX: +1 801 581 4148, e-mail: \path|beebe@math.utah.edu|", bibdate = "Sat Aug 13 17:16:20 MDT 1994", keywords = "languages", subject = "D.4.2 Software, OPERATING SYSTEMS, Storage Management \\ D.2.m Software, SOFTWARE ENGINEERING, Miscellaneous \\ D.3.2 Software, PROGRAMMING LANGUAGES, Language Classifications \\ D.3.3 Software, PROGRAMMING LANGUAGES, Language Constructs", } @Article{Wilson:1989:SBA, author = "P. R. Wilson", title = "A simple bucket-brigade advancement mechanism for generation-bases garbage collection", journal = "ACM SIGPLAN Notices", volume = "24", number = "5", pages = "38--46", month = may, year = "1989", ISSN = "0362-1340", acknowledgement = "Nelson H. F. Beebe, Center for Scientific Computing, Department of Mathematics, University of Utah, Salt Lake City, UT 84112, USA, Tel: +1 801 581 5254, FAX: +1 801 581 4148, e-mail: \path|beebe@math.utah.edu|", bibdate = "Sat Aug 13 17:16:20 MDT 1994", keywords = "design", subject = "D.4.2 Software, OPERATING SYSTEMS, Storage Management", }
@Article{STRAW89, key = "Straw et al.", author = "A. Straw and F. Mellender and S. Riegel", title = "Object Management in a Persistent Smalltalk System", journal = "spe", publisher = "John Wiley \& Sons, Ltd.", volume = "19", number = "8", month = aug, year = "1989", pages = "719--737", keywords = "Smalltalk-80; Persistent programming languages; Object-oriented databases; Garbage collection", abstract = "The main goal of the Alltalk project is to provide transparent database support to the Smalltalk programmer. As a first step towards this goal, the current version of Alltalk extends Smalltalk-80 by providing persistence to Smalltalk objects without adding a database sublanguage, new language syntax, classes or methods. This paper describes the implementation of object management in Alltalk, including database layout, database access methods, in-memory object management and high-level interface from the interpreter. It also discusses how the object manager is integrated in a unique way with the operations of the Alltalk garbage collector and interpreter.", bibdate = "Fri Oct 6 12:45:56 1989", owner = "robyn", } @InProceedings{WEISER89, key = "Weiser et al.", author = "M. Weiser and A. Demers and C. Hauser", title = "The Portable Common Runtime Approach to Interoperability", booktitle = "Proceedings of the Twelfth ACM SOSP", address = "Litchfield Park, AZ", volume = "23", month = dec, year = "1989", pages = "114--122", abstract = "Operating system abstractions do not always reach high enough for direct use by a language or applications designer. The gap is filled by language-specific runtime environments, which become more complex for richer languages (CommonLisp needs more than C++, which needs more than C). But language-specific environments inhibit integrated multi-lingual programming, and also make porting hard (for instance, because of operating system dependencies). To help solve these problems, we have built the Portable Common Runtime (PCR), a language-independent and operating-system-independent base for modern languages. PCR offers four interrelated facilities: storage management (including universal garbage collection), symbol binding (including static and dynamic linking and loading), threads (lightweight processes ), and low-level I/O (including network sockets). PCR is 'common' because these facilities simultaneously support programs in several languages. PCR supports C, Cedar, Scheme, and CommonLisp intercalling and runs pre-existing C and CommonLisp (Kyoto) binaries. PCR is 'portable' because it uses only a small set of operating system features. The PCR source code is available for use by other researchers and developers.", bibdate = "Thu Jan 11 11:00:07 1990", owner = "robyn", }
@TechReport{Glaser89, author = "H. Glaser and M. Reeve and S. Wright", title = "An Analysis of Reference Count Garbage Collection Schemes for Declarative Languages", institution = "Department of Computing, Imperial College", type = "Internal Report", address = "London, UK", year = "1989", keywords = "real time distributed systems functional", abstract = "This paper considers different methods of implementing reference count garbage collection and presents a new scheme, the Lazy/Weighted method. It gives a comparison of the methods based on efficiency, real-time operation and applicability for both centralised and distributed systems and then uses the data gathered from experiments with the ALICE parallel machine to quantify the comparison. The results show that the Lazy/Weighted method is the most promising.", } @InProceedings{Jones89, author = "S. B. Jones and D. {Le M{\'e}tayer}", title = "Compile-Time Garbage Collection by Sharing Analysis", booktitle = "Proceedings of the Conference on Functional Programming Languages and Computer Architecture '89, Imperial College, London", pages = "54--74", publisher = "ACM", address = "New York, NY", year = "1989", keywords = "le metayer", abstract = "This paper describes an analysis technique to reduce the cost in processing time of the storage management operations implied by a program (possibly to zero).", } @InCollection{Lester89, author = "D. Lester", title = "An Efficient Distributed Garbage Collector Algorithm", booktitle = "PARLE '89, Parallel Architectures and Languages Europe", publisher = "Springer-Verlag", address = "New York, NY", year = "1989", keywords = "functional", note = "Lecture Notes in Computer Science 365.", } @InCollection{Queinnec89, author = "C. Queinnec and B. Beaudoing and J-P. Queille", title = "Mark {DURING} Sweep rather than Mark {THEN} Sweep", booktitle = "PARLE '89, Parallel Architectures and Languages Europe", publisher = "Springer-Verlag", address = "New York, NY", year = "1989", keywords = "garbage collection functional", note = "Lecture Notes in Computer Science 365.", } @InProceedings{Sansom92, author = "P. Sansom", editor = "R. Heldal and C. K. Holst and P. L. Wadler", title = "Combining Single-Space and Two-Space Compacting Garbage Collectors", booktitle = "Functional Programming, Glasgow 1991: Proceedings of the 1991 Workshop, Portree, UK", pages = "312--323", publisher = "Springer-Verlag", address = "Berlin, DE", year = "1992", ISBN = "3-540-19760-5", abstract = "The garbage collector presented makes use of two well known compaction garbage collection algorithms with very different performance characteristics: C.J. Cheney's (1970) two-space copying collector and H.B.M. Jonker's (1979) single-space sliding compaction collector. The author proposes a scheme which allows either collector to be used. The run-time memory requirements of the program being executed are used to determine the most appropriate collector. This enables one to achieve a fast collector for heap requirements less than half of the heap memory but allows the heap utilization to increase beyond this threshold. Using these ideas the author develops a particularly attractive extension to A.W. Appel's (1989) generational collector.", }
@TechReport{Bjoe89a, author = "Anders Bj{\"o}rnerstedt", editor = "D. Tsichritzis", title = "Secondary Storage Garbage Collection for Decentralized Object-Based Systems", institution = "Centre Universitaire d'Informatique, University of Geneva", type = "Object Oriented Development", pages = "277--319", month = jul, year = "1989", keywords = "olit osg-ftp ood89", abstract = "This paper describes a mechanism for secondary storage garbage collection that may be used to reclaim inaccessible resources in decentralized persistent object based systems. Schemes for object addressing and object identification are discussed and a proposal is made which handles volatile objects separately from persistent objects. The garbage collection of the space of volatile objects is decoupled from the garbage collection of the space of persistent objects. The first kind of garbage collection can avoid the complexity and overhead of a distributed algorithm by classifying {"}exported{"} objects as persistent. The problem of detecting and collecting {"}distributed garbage{"} is then deferred to garbage collection of persistent objects.", }
@PhdThesis{rep:pan:alg:gc:683, author = "Rivka Ladin", title = "A Method for Constructing Highly Available Services and a Technique for Distributed Garbage Collection", school = "Massachusetts Institute of Technology", year = "1989", address = "Cambridge, MA ({USA})", month = may, }
@InProceedings{Weiser89, author = "Mark Weiser and Alan Demers and Carl Hauser", title = "The Portable Common Runtime approach to interoperability", booktitle = "Proceedings of the 12th ACM Symposium on Operating System Principles", conflocation = "Litchfield Park, AZ, 3--6 December 1989", journal = "Operating Systems Review", volume = "23", number = "5", date = "December 1989", pages = "114--22", key = "Weiser89", keywords = "Weiser89 structural issues, PCR, garbage collection, threads, IO incremental loader, symbol binding C, CommonLisp, Cedar, language runtime system", abstract = "Operating system abstractions do not always reach high enough for direct use by a language or applications designer. The gap is filled by language-specific runtime environments, which become more complex for richer languages (CommonLisp needs more than C++, which needs more than C). But language-specific environments inhibit integrated multi-lingual programming. and also make porting hard (for instance, because of operating system dependencies). To help solve these problems, we have built the Portable Common Runtime (PCR), a language-independent and operating-system-independent base for modern languages. PCR offers four interrelated facilities: storage management (including universal garbage collection), symbol binding (including static and dynamic linking and loading), threads (lightweight processes), and low-level I/O (including network sockets). PCR is ``common'' because these facilities simultaneously support programs in several languages. PCR supports C, Cedar, Scheme, and CommonLisp intercalling and runs pre-existing C and CommonLisp (Kyoto) binaries. PCR is ``portable'' because it uses only a small set of operating system features. The PCR source code is available for use by other researchers and developers.", }
@TechReport{MKN89, author = "N. Miyauchi and Y. Kawada and K. Nakajima", title = "{Tracing Garbage Collection for KL1 on the Multi-PSI/V2 System}", institution = "Institute for New Generation Computer Technology", address = "ICOT Research Center, Tokyo, Japan", month = mar, year = "1989", type = "Technical Report", number = "TR-469", owner = "pcl", descr = "pllog,pagc", }
@Article{AppBen89, author = "Andrew W. Appel and Aage Bendiksen", title = "Vectorized Garbage Collection", journal = "The Journal of Supercomputing", year = "1989", volume = "3", number = "3", month = sep, }
@TechReport{Cardelli89b, author = "Luca Cardelli and James Donahue and Lucille Glassman and Mick Jordan and Bill Kalsow and Greg Nelson", title = "Modula-3 Report (revised)", institution = "Digital Equipment Corporation, Systems Research Centre", number = "52", pages = "71 pages.", month = "1 " # nov, year = "1989", abstract = "The goal of Modula-3 is to be as simple and safe as it can be while meeting the needs of modern systems programmers. Instead of exploring new features, we studied the features from the Modula family of languages that have proven themselves in practice and tried to simplify them and fit them into a harmonious language. We found that most of the successful features were aimed at one of two main goals: greater robustness, and a simpler, more systematic type system. Modula-3 descends from Mesa, Modula-2, Cedar, and Modula-2+. It also resembles its cousins Object Pascal, Oberon, and Euclid. Modula-3 retains one of Modula-2's most successful features, the provision for explicit interfaces between modules. It adds objects and classes, exception handling, garbage collection, lightweight processes (or threads), and the isolation of unsafe features. The Modula-3 report was published by Olivetti and Digital in August 1988. Implementation efforts followed shortly at both companies. In January 1989, the committee revised the language to reflect the experiences of these implementation teams. The main changes were the introduction of branded reference types, the requirement that opaque types be branded, the legalization of opaque supertypes, and the new flexibility in revealing information about an opaque type.", }
@TechReport{Peng89, author = "Chih-Jui Peng and Gurindar S. Sohi", title = "Cache Memory Design Considerations to Support Languages with Dynamic Heap Allocation", institution = "Computer Sciences Department", number = "TR 860", address = "University of Wisconsin-Madison", month = jul, year = "1989", abstract = "In this paper, we consider the design of cache memories to support the execution of languages that make extensive use of a dynamic heap. To get insight into the cache memory design, we define several characteristics of dynamic heap references and measure these characteristics for several benchmark programs using Lisp as our model heap-intensive language. We make several observations about the heap referencing characteristics and study the implications of the referencing characteristics on cache memory design. $>$From our observations, we conclude that conventional cache memories are likely to be inadequate in supporting dynamic heap references. We also verify this conclusion with an extensive trace-driven simulation analysis. Then we present some cache optimizations that exploit the peculiarities of heap references. These optimizations include: i) the use of an {\it ALLOCATE} operation to improve the cache miss ratio as well as the data traffic ratio, ii) the use of a {\it biased-LRU} replacement algorithm that discriminates against garbage lines and moves the miss ratio of a cache closer to that of an unrealizable optimal cache and iii) the use of a {\it garbage bit} with each cache line that eliminates unnecessary write back operations. Using trace-driven simulation, we conclude that with the heap-specific cache optimizations proposed, it is possible to design cache memories that have a miss ratio and a data traffic ratio that is close to 0. Without these optimizations, the miss ratio and data traffic ratio of a cache organization can be extremely poor, regardless of the cache size. Two of the proposed optimizations rely on a mechanism that detects garbage soon after it is created. Since cache memory performance without the proposed optimizations is very poor, we point out the need for garbage collection mechanisms that can detect garbage almost immediately after it is created and while the garbage heap cell is still resident in the cache.", }
@TechReport{MIT/LCS/TR-467, author = "J. D. Morrison", title = "A {SCALABLE} {MULTIPROCESSOR} {ARCHITECTURE} {USING} {CARTESIAN} {NETWORK}-{RELATIVE} {ADDRESSING}", institution = "MIT Laboratory for Computer Science", number = "MIT/LCS/TR-467", pages = "125", month = dec, year = "1989", price = "USD 18.00", keywords = "multiprocessor, scalability, Cartesian, topology, address space, relative addressing, task migration, parallelism", abstract = "The Computer Architecture Group at the Laboratory for Computer Science is developing a new model of computation called . This thesis describes a highly scalable architecture for implementing called Cartesian Network-Relative Addressing (CNRA). In the CNRA architecture, processor/memory pairs are placed at the nodes of a low-dimensional Cartesian grid network. Addresses in the system are composed of a {"}routing{"} component which describes a relative path through the interconnection network (the origin of the path is the node on which the address resides), and a {"}memory location{"} component which specifies the memory location to be addressed on the node at the destination of the routing path. The CNRA addressing system allows sharing of data structures in a style similar to that of global shared memory machines, but does not have the disadvantages normally associated with shared-memory machines (i.e. limited address space and memory access latency that increases with system size). This thesis discusses how a practical CNRA system might be built. There are discussions on how the system software might manage the {"}relative pointers{"} in a clean, transparent way, solutions to the problem of testing pointer equivalence, protocols and algorithms for migrating objects to maximize concurrency and communication locality, garbage collection techniques, and other aspects of the CNRA system design. Simulations experiments with a toy program are presented, and the results seem encouraging.", }
@InProceedings{Lester89, author = "Lester", title = "An Efficient Distributed Garbage Collection Algorithm", booktitle = "Parallel Architectures and Languages Europe", publisher = "LNCS, Springer-Verlag", year = "1989", }
Found 39 references in 21 bibliographies.
You used 21 seconds of our CPU time.