3 // Copyright (c) Microsoft Corporation. All rights reserved.
6 /*============================================================
10 ** <OWNER>Microsoft</OWNER>
13 ** Purpose: Hash table implementation
16 ===========================================================*/
18 namespace System.Collections {
21 using System.Runtime.Serialization;
22 using System.Security.Permissions;
23 using System.Diagnostics;
24 using System.Threading;
25 using System.Runtime.CompilerServices;
26 using System.Runtime.ConstrainedExecution;
27 using System.Diagnostics.Contracts;
28 using System.Security.Cryptography;
30 // The Hashtable class represents a dictionary of associated keys and values
31 // with constant lookup time.
33 // Objects used as keys in a hashtable must implement the GetHashCode
34 // and Equals methods (or they can rely on the default implementations
35 // inherited from Object if key equality is simply reference
36 // equality). Furthermore, the GetHashCode and Equals methods of
37 // a key object must produce the same results given the same parameters for the
38 // entire time the key is present in the hashtable. In practical terms, this
39 // means that key objects should be immutable, at least for the time they are
40 // used as keys in a hashtable.
42 // When entries are added to a hashtable, they are placed into
43 // buckets based on the hashcode of their keys. Subsequent lookups of
44 // keys will use the hashcode of the keys to only search a particular bucket,
45 // thus substantially reducing the number of key comparisons required to find
46 // an entry. A hashtable's maximum load factor, which can be specified
47 // when the hashtable is instantiated, determines the maximum ratio of
48 // hashtable entries to hashtable buckets. Smaller load factors cause faster
49 // average lookup times at the cost of increased memory consumption. The
50 // default maximum load factor of 1.0 generally provides the best balance
51 // between speed and size. As entries are added to a hashtable, the hashtable's
52 // actual load factor increases, and when the actual load factor reaches the
53 // maximum load factor value, the number of buckets in the hashtable is
54 // automatically increased by approximately a factor of two (to be precise, the
55 // number of hashtable buckets is increased to the smallest prime number that
56 // is larger than twice the current number of hashtable buckets).
58 // Each object provides their own hash function, accessed by calling
59 // GetHashCode(). However, one can write their own object
60 // implementing IEqualityComparer and pass it to a constructor on
61 // the Hashtable. That hash function (and the equals method on the
62 // IEqualityComparer) would be used for all objects in the table.
64 // Changes since V1 during Whidbey:
65 // *) Deprecated IHashCodeProvider, use IEqualityComparer instead. This will
66 // allow better performance for objects where equality checking can be
67 // done much faster than establishing an ordering between two objects,
68 // such as an ordinal string equality check.
70 [DebuggerTypeProxy(typeof(System.Collections.Hashtable.HashtableDebugView))]
71 [DebuggerDisplay("Count = {Count}")]
72 [System.Runtime.InteropServices.ComVisible(true)]
74 public class Hashtable : IDictionary, ISerializable, IDeserializationCallback, ICloneable {
77 The generic Dictionary was copied from Hashtable's source - any bug
78 fixes here probably need to be made to the generic Dictionary as well.
80 This Hashtable uses double hashing. There are hashsize buckets in the
81 table, and each bucket can contain 0 or 1 element. We a bit to mark
82 whether there's been a collision when we inserted multiple elements
83 (ie, an inserted item was hashed at least a second time and we probed
84 this bucket, but it was already in use). Using the collision bit, we
85 can terminate lookups & removes for elements that aren't in the hash
86 table more quickly. We steal the most significant bit from the hash code
87 to store the collision bit.
89 Our hash function is of the following form:
91 h(key, n) = h1(key) + n*h2(key)
93 where n is the number of times we've hit a collided bucket and rehashed
94 (on this particular lookup). Here are our hash functions:
96 h1(key) = GetHash(key); // default implementation calls key.GetHashCode();
97 h2(key) = 1 + (((h1(key) >> 5) + 1) % (hashsize - 1));
99 The h1 can return any number. h2 must return a number between 1 and
100 hashsize - 1 that is relatively prime to hashsize (not a problem if
101 hashsize is prime). (Knuth's Art of Computer Programming, Vol. 3, p. 528-9)
102 If this is true, then we are guaranteed to visit every bucket in exactly
103 hashsize probes, since the least common multiple of hashsize and h2(key)
104 will be hashsize * h2(key). (This is the first number where adding h2 to
105 h1 mod hashsize will be 0 and we will search the same bucket twice).
107 We previously used a different h2(key, n) that was not constant. That is a
108 horrifically bad idea, unless you can prove that series will never produce
109 any identical numbers that overlap when you mod them by hashsize, for all
110 subranges from i to i+hashsize, for all i. It's not worth investigating,
111 since there was no clear benefit from using that hash function, and it was
114 For efficiency reasons, we've implemented this by storing h1 and h2 in a
115 temporary, and setting a variable called seed equal to h1. We do a probe,
116 and if we collided, we simply add h2 to seed each time through the loop.
118 A good test for h2() is to subclass Hashtable, provide your own implementation
119 of GetHash() that returns a constant, then add many items to the hash table.
120 Make sure Count equals the number of items you inserted.
122 Note that when we remove an item from the hash table, we set the key
123 equal to buckets, if there was a collision in this bucket. Otherwise
124 we'd either wipe out the collision bit, or we'd still have an item in
130 internal const Int32 HashPrime = 101;
131 private const Int32 InitialSize = 3;
132 private const String LoadFactorName = "LoadFactor";
133 private const String VersionName = "Version";
134 private const String ComparerName = "Comparer";
135 private const String HashCodeProviderName = "HashCodeProvider";
136 private const String HashSizeName = "HashSize"; // Must save buckets.Length
137 private const String KeysName = "Keys";
138 private const String ValuesName = "Values";
139 private const String KeyComparerName = "KeyComparer";
141 // Deleted entries have their key set to buckets
143 // The hash table data.
144 // This cannot be serialised
145 private struct bucket {
148 public int hash_coll; // Store hash code; sign bit means there was a collision.
151 private bucket[] buckets;
153 // The total number of entries in the hash table.
156 // The total number of collision bits set in the hashtable
157 private int occupancy;
159 private int loadsize;
160 private float loadFactor;
162 private volatile int version;
163 private volatile bool isWriterInProgress;
165 private ICollection keys;
166 private ICollection values;
168 private IEqualityComparer _keycomparer;
169 private Object _syncRoot;
171 [Obsolete("Please use EqualityComparer property.")]
172 protected IHashCodeProvider hcp
176 if( _keycomparer is CompatibleComparer) {
177 return ((CompatibleComparer)_keycomparer).HashCodeProvider;
179 else if( _keycomparer == null) {
183 throw new ArgumentException(Environment.GetResourceString("Arg_CannotMixComparisonInfrastructure"));
188 if (_keycomparer is CompatibleComparer) {
189 CompatibleComparer keyComparer = (CompatibleComparer)_keycomparer;
190 _keycomparer = new CompatibleComparer(keyComparer.Comparer, value);
192 else if( _keycomparer == null) {
193 _keycomparer = new CompatibleComparer((IComparer)null, value);
196 throw new ArgumentException(Environment.GetResourceString("Arg_CannotMixComparisonInfrastructure"));
202 [Obsolete("Please use KeyComparer properties.")]
203 protected IComparer comparer
207 if( _keycomparer is CompatibleComparer) {
208 return ((CompatibleComparer)_keycomparer).Comparer;
210 else if( _keycomparer == null) {
214 throw new ArgumentException(Environment.GetResourceString("Arg_CannotMixComparisonInfrastructure"));
219 if (_keycomparer is CompatibleComparer) {
220 CompatibleComparer keyComparer = (CompatibleComparer)_keycomparer;
221 _keycomparer = new CompatibleComparer(value, keyComparer.HashCodeProvider);
223 else if( _keycomparer == null) {
224 _keycomparer = new CompatibleComparer(value, (IHashCodeProvider)null);
227 throw new ArgumentException(Environment.GetResourceString("Arg_CannotMixComparisonInfrastructure"));
232 protected IEqualityComparer EqualityComparer
240 // Note: this constructor is a bogus constructor that does nothing
241 // and is for use only with SyncHashtable.
242 internal Hashtable( bool trash )
246 // Constructs a new hashtable. The hashtable is created with an initial
247 // capacity of zero and a load factor of 1.0.
248 public Hashtable() : this(0, 1.0f) {
251 // Constructs a new hashtable with the given initial capacity and a load
252 // factor of 1.0. The capacity argument serves as an indication of
253 // the number of entries the hashtable will contain. When this number (or
254 // an approximation) is known, specifying it in the constructor can
255 // eliminate a number of resizing operations that would otherwise be
256 // performed when elements are added to the hashtable.
258 public Hashtable(int capacity) : this(capacity, 1.0f) {
261 // Constructs a new hashtable with the given initial capacity and load
262 // factor. The capacity argument serves as an indication of the
263 // number of entries the hashtable will contain. When this number (or an
264 // approximation) is known, specifying it in the constructor can eliminate
265 // a number of resizing operations that would otherwise be performed when
266 // elements are added to the hashtable. The loadFactor argument
267 // indicates the maximum ratio of hashtable entries to hashtable buckets.
268 // Smaller load factors cause faster average lookup times at the cost of
269 // increased memory consumption. A load factor of 1.0 generally provides
270 // the best balance between speed and size.
272 public Hashtable(int capacity, float loadFactor) {
274 throw new ArgumentOutOfRangeException("capacity", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
275 if (!(loadFactor >= 0.1f && loadFactor <= 1.0f))
276 throw new ArgumentOutOfRangeException("loadFactor", Environment.GetResourceString("ArgumentOutOfRange_HashtableLoadFactor", .1, 1.0));
277 Contract.EndContractBlock();
279 // Based on perf work, .72 is the optimal load factor for this table.
280 this.loadFactor = 0.72f * loadFactor;
282 double rawsize = capacity / this.loadFactor;
283 if (rawsize > Int32.MaxValue)
284 throw new ArgumentException(Environment.GetResourceString("Arg_HTCapacityOverflow"));
286 // Avoid awfully small sizes
287 int hashsize = (rawsize > InitialSize) ? HashHelpers.GetPrime((int)rawsize) : InitialSize;
288 buckets = new bucket[hashsize];
290 loadsize = (int)(this.loadFactor * hashsize);
291 isWriterInProgress = false;
292 // Based on the current algorithm, loadsize must be less than hashsize.
293 Contract.Assert( loadsize < hashsize, "Invalid hashtable loadsize!");
296 // Constructs a new hashtable with the given initial capacity and load
297 // factor. The capacity argument serves as an indication of the
298 // number of entries the hashtable will contain. When this number (or an
299 // approximation) is known, specifying it in the constructor can eliminate
300 // a number of resizing operations that would otherwise be performed when
301 // elements are added to the hashtable. The loadFactor argument
302 // indicates the maximum ratio of hashtable entries to hashtable buckets.
303 // Smaller load factors cause faster average lookup times at the cost of
304 // increased memory consumption. A load factor of 1.0 generally provides
305 // the best balance between speed and size. The hcp argument
306 // is used to specify an Object that will provide hash codes for all
307 // the Objects in the table. Using this, you can in effect override
308 // GetHashCode() on each Object using your own hash function. The
309 // comparer argument will let you specify a custom function for
310 // comparing keys. By specifying user-defined objects for hcp
311 // and comparer, users could make a hash table using strings
312 // as keys do case-insensitive lookups.
314 [Obsolete("Please use Hashtable(int, float, IEqualityComparer) instead.")]
315 public Hashtable(int capacity, float loadFactor, IHashCodeProvider hcp, IComparer comparer) : this(capacity, loadFactor) {
316 if (hcp == null && comparer == null) {
317 this._keycomparer = null;
320 this._keycomparer = new CompatibleComparer(comparer,hcp);
324 public Hashtable(int capacity, float loadFactor, IEqualityComparer equalityComparer) : this(capacity, loadFactor) {
325 this._keycomparer = equalityComparer;
328 // Constructs a new hashtable using a custom hash function
329 // and a custom comparison function for keys. This will enable scenarios
330 // such as doing lookups with case-insensitive strings.
332 [Obsolete("Please use Hashtable(IEqualityComparer) instead.")]
333 public Hashtable(IHashCodeProvider hcp, IComparer comparer) : this(0, 1.0f, hcp, comparer) {
336 public Hashtable(IEqualityComparer equalityComparer) : this(0, 1.0f, equalityComparer) {
339 // Constructs a new hashtable using a custom hash function
340 // and a custom comparison function for keys. This will enable scenarios
341 // such as doing lookups with case-insensitive strings.
343 [Obsolete("Please use Hashtable(int, IEqualityComparer) instead.")]
344 public Hashtable(int capacity, IHashCodeProvider hcp, IComparer comparer)
345 : this(capacity, 1.0f, hcp, comparer) {
348 public Hashtable(int capacity, IEqualityComparer equalityComparer)
349 : this(capacity, 1.0f, equalityComparer) {
352 // Constructs a new hashtable containing a copy of the entries in the given
353 // dictionary. The hashtable is created with a load factor of 1.0.
355 public Hashtable(IDictionary d) : this(d, 1.0f) {
358 // Constructs a new hashtable containing a copy of the entries in the given
359 // dictionary. The hashtable is created with the given load factor.
361 public Hashtable(IDictionary d, float loadFactor)
362 : this(d, loadFactor, (IEqualityComparer)null) {
365 [Obsolete("Please use Hashtable(IDictionary, IEqualityComparer) instead.")]
366 public Hashtable(IDictionary d, IHashCodeProvider hcp, IComparer comparer)
367 : this(d, 1.0f, hcp, comparer) {
370 public Hashtable(IDictionary d, IEqualityComparer equalityComparer)
371 : this(d, 1.0f, equalityComparer) {
374 [Obsolete("Please use Hashtable(IDictionary, float, IEqualityComparer) instead.")]
375 public Hashtable(IDictionary d, float loadFactor, IHashCodeProvider hcp, IComparer comparer)
376 : this((d != null ? d.Count : 0), loadFactor, hcp, comparer) {
378 throw new ArgumentNullException("d", Environment.GetResourceString("ArgumentNull_Dictionary"));
379 Contract.EndContractBlock();
381 IDictionaryEnumerator e = d.GetEnumerator();
382 while (e.MoveNext()) Add(e.Key, e.Value);
385 public Hashtable(IDictionary d, float loadFactor, IEqualityComparer equalityComparer)
386 : this((d != null ? d.Count : 0), loadFactor, equalityComparer) {
388 throw new ArgumentNullException("d", Environment.GetResourceString("ArgumentNull_Dictionary"));
389 Contract.EndContractBlock();
391 IDictionaryEnumerator e = d.GetEnumerator();
392 while (e.MoveNext()) Add(e.Key, e.Value);
395 protected Hashtable(SerializationInfo info, StreamingContext context) {
396 //We can't do anything with the keys and values until the entire graph has been deserialized
397 //and we have a reasonable estimate that GetHashCode is not going to fail. For the time being,
398 //we'll just cache this. The graph is not valid until OnDeserialization has been called.
399 HashHelpers.SerializationInfoTable.Add(this, info);
402 //
\91InitHash
\92 is basically an implementation of classic DoubleHashing (see http://en.wikipedia.org/wiki/Double_hashing)
404 // 1) The only
\91correctness
\92 requirement is that the
\91increment
\92 used to probe
406 // b. Be relatively prime to the table size
\91hashSize
\92. (This is needed to insure you probe all entries in the table before you
\91wrap
\92 and visit entries already probed)
407 // 2) Because we choose table sizes to be primes, we just need to insure that the increment is 0 < incr < hashSize
409 // Thus this function would work: Incr = 1 + (seed % (hashSize-1))
411 // While this works well for
\91uniformly distributed
\92 keys, in practice, non-uniformity is common.
412 // In particular in practice we can see
\91mostly sequential
\92 where you get long clusters of keys that
\91pack
\92.
413 // To avoid bad behavior you want it to be the case that the increment is
\91large
\92 even for
\91small
\92 values (because small
414 // values tend to happen more in practice). Thus we multiply
\91seed
\92 by a number that will make these small values
415 // bigger (and not hurt large values). We picked HashPrime (101) because it was prime, and if
\91hashSize-1
\92 is not a multiple of HashPrime
416 // (enforced in GetPrime), then incr has the potential of being every value from 1 to hashSize-1. The choice was largely arbitrary.
418 // Computes the hash function: H(key, i) = h1(key) + i*h2(key, hashSize).
419 // The out parameter seed is h1(key), while the out parameter
420 // incr is h2(key, hashSize). Callers of this function should
421 // add incr each time through a loop.
422 private uint InitHash(Object key, int hashsize, out uint seed, out uint incr) {
423 // Hashcode must be positive. Also, we must not use the sign bit, since
424 // that is used for the collision bit.
425 uint hashcode = (uint) GetHash(key) & 0x7FFFFFFF;
426 seed = (uint) hashcode;
427 // Restriction: incr MUST be between 1 and hashsize - 1, inclusive for
428 // the modular arithmetic to work correctly. This guarantees you'll
429 // visit every bucket in the table exactly once within hashsize
430 // iterations. Violate this and it'll cause obscure bugs forever.
431 // If you change this calculation for h2(key), update putEntry too!
432 incr = (uint)(1 + ((seed * HashPrime) % ((uint)hashsize - 1)));
436 // Adds an entry with the given key and value to this hashtable. An
437 // ArgumentException is thrown if the key is null or if the key is already
438 // present in the hashtable.
440 public virtual void Add(Object key, Object value) {
441 Insert(key, value, true);
444 // Removes all entries from this hashtable.
445 [ReliabilityContract(Consistency.WillNotCorruptState, Cer.Success)]
446 public virtual void Clear() {
447 Contract.Assert(!isWriterInProgress, "Race condition detected in usages of Hashtable - multiple threads appear to be writing to a Hashtable instance simultaneously! Don't do that - use Hashtable.Synchronized.");
449 if (count == 0 && occupancy == 0)
453 Thread.BeginCriticalRegion();
455 isWriterInProgress = true;
456 for (int i = 0; i < buckets.Length; i++){
457 buckets[i].hash_coll = 0;
458 buckets[i].key = null;
459 buckets[i].val = null;
465 isWriterInProgress = false;
467 Thread.EndCriticalRegion();
471 // Clone returns a virtually identical copy of this hash table. This does
472 // a shallow copy - the Objects in the table aren't cloned, only the references
474 public virtual Object Clone()
476 bucket[] lbuckets = buckets;
477 Hashtable ht = new Hashtable(count,_keycomparer);
478 ht.version = version;
479 ht.loadFactor = loadFactor;
482 int bucket = lbuckets.Length;
485 Object keyv = lbuckets[bucket].key;
486 if ((keyv!= null) && (keyv != lbuckets)) {
487 ht[keyv] = lbuckets[bucket].val;
494 // Checks if this hashtable contains the given key.
495 public virtual bool Contains(Object key) {
496 return ContainsKey(key);
499 // Checks if this hashtable contains an entry with the given key. This is
500 // an O(1) operation.
502 public virtual bool ContainsKey(Object key) {
504 throw new ArgumentNullException("key", Environment.GetResourceString("ArgumentNull_Key"));
506 Contract.EndContractBlock();
510 // Take a snapshot of buckets, in case another thread resizes table
511 bucket[] lbuckets = buckets;
512 uint hashcode = InitHash(key, lbuckets.Length, out seed, out incr);
516 int bucketNumber = (int) (seed % (uint)lbuckets.Length);
518 b = lbuckets[bucketNumber];
522 if (((b.hash_coll & 0x7FFFFFFF) == hashcode) &&
523 KeyEquals (b.key, key))
525 bucketNumber = (int) (((long)bucketNumber + incr)% (uint)lbuckets.Length);
526 } while (b.hash_coll < 0 && ++ntry < lbuckets.Length);
532 // Checks if this hashtable contains an entry with the given value. The
533 // values of the entries of the hashtable are compared to the given value
534 // using the Object.Equals method. This method performs a linear
535 // search and is thus be substantially slower than the ContainsKey
538 public virtual bool ContainsValue(Object value) {
541 for (int i = buckets.Length; --i >= 0;) {
542 if (buckets[i].key != null && buckets[i].key != buckets && buckets[i].val == null)
547 for (int i = buckets.Length; --i >= 0;) {
548 Object val = buckets[i].val;
549 if (val!=null && val.Equals(value)) return true;
555 // Copies the keys of this hashtable to a given array starting at a given
556 // index. This method is used by the implementation of the CopyTo method in
557 // the KeyCollection class.
558 private void CopyKeys(Array array, int arrayIndex) {
559 Contract.Requires(array != null);
560 Contract.Requires(array.Rank == 1);
562 bucket[] lbuckets = buckets;
563 for (int i = lbuckets.Length; --i >= 0;) {
564 Object keyv = lbuckets[i].key;
565 if ((keyv != null) && (keyv != buckets)){
566 array.SetValue(keyv, arrayIndex++);
571 // Copies the keys of this hashtable to a given array starting at a given
572 // index. This method is used by the implementation of the CopyTo method in
573 // the KeyCollection class.
574 private void CopyEntries(Array array, int arrayIndex) {
575 Contract.Requires(array != null);
576 Contract.Requires(array.Rank == 1);
578 bucket[] lbuckets = buckets;
579 for (int i = lbuckets.Length; --i >= 0;) {
580 Object keyv = lbuckets[i].key;
581 if ((keyv != null) && (keyv != buckets)){
582 DictionaryEntry entry = new DictionaryEntry(keyv,lbuckets[i].val);
583 array.SetValue(entry, arrayIndex++);
588 // Copies the values in this hash table to an array at
589 // a given index. Note that this only copies values, and not keys.
590 public virtual void CopyTo(Array array, int arrayIndex)
593 throw new ArgumentNullException("array", Environment.GetResourceString("ArgumentNull_Array"));
595 throw new ArgumentException(Environment.GetResourceString("Arg_RankMultiDimNotSupported"));
597 throw new ArgumentOutOfRangeException("arrayIndex", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
598 if (array.Length - arrayIndex < Count)
599 throw new ArgumentException(Environment.GetResourceString("Arg_ArrayPlusOffTooSmall"));
600 Contract.EndContractBlock();
601 CopyEntries(array, arrayIndex);
604 // Copies the values in this Hashtable to an KeyValuePairs array.
605 // KeyValuePairs is different from Dictionary Entry in that it has special
606 // debugger attributes on its fields.
608 internal virtual KeyValuePairs[] ToKeyValuePairsArray() {
610 KeyValuePairs[] array = new KeyValuePairs[count];
612 bucket[] lbuckets = buckets;
613 for (int i = lbuckets.Length; --i >= 0;) {
614 Object keyv = lbuckets[i].key;
615 if ((keyv != null) && (keyv != buckets)){
616 array[index++] = new KeyValuePairs(keyv,lbuckets[i].val);
624 // Copies the values of this hashtable to a given array starting at a given
625 // index. This method is used by the implementation of the CopyTo method in
626 // the ValueCollection class.
627 private void CopyValues(Array array, int arrayIndex) {
628 Contract.Requires(array != null);
629 Contract.Requires(array.Rank == 1);
631 bucket[] lbuckets = buckets;
632 for (int i = lbuckets.Length; --i >= 0;) {
633 Object keyv = lbuckets[i].key;
634 if ((keyv != null) && (keyv != buckets)){
635 array.SetValue(lbuckets[i].val, arrayIndex++);
640 // Returns the value associated with the given key. If an entry with the
641 // given key is not found, the returned value is null.
643 public virtual Object this[Object key] {
646 throw new ArgumentNullException("key", Environment.GetResourceString("ArgumentNull_Key"));
648 Contract.EndContractBlock();
654 // Take a snapshot of buckets, in case another thread does a resize
655 bucket[] lbuckets = buckets;
656 uint hashcode = InitHash(key, lbuckets.Length, out seed, out incr);
660 int bucketNumber = (int) (seed % (uint)lbuckets.Length);
665 // A read operation on hashtable has three steps:
666 // (1) calculate the hash and find the slot number.
667 // (2) compare the hashcode, if equal, go to step 3. Otherwise end.
668 // (3) compare the key, if equal, go to step 4. Otherwise end.
669 // (4) return the value contained in the bucket.
670 // After step 3 and before step 4. A writer can kick in a remove the old item and add a new one
671 // in the same bukcet. So in the reader we need to check if the hash table is modified during above steps.
673 // Writers (Insert, Remove, Clear) will set 'isWriterInProgress' flag before it starts modifying
674 // the hashtable and will ckear the flag when it is done. When the flag is cleared, the 'version'
675 // will be increased. We will repeat the reading if a writer is in progress or done with the modification
678 // Our memory model guarantee if we pick up the change in bucket from another processor,
679 // we will see the 'isWriterProgress' flag to be true or 'version' is changed in the reader.
683 // this is violate read, following memory accesses can not be moved ahead of it.
684 currentversion = version;
685 b = lbuckets[bucketNumber];
687 // The contention between reader and writer shouldn't happen frequently.
688 // But just in case this will burn CPU, yield the control of CPU if we spinned a few times.
689 // 8 is just a random number I pick.
690 if( (++spinCount) % 8 == 0 ) {
691 Thread.Sleep(1); // 1 means we are yeilding control to all threads, including low-priority ones.
693 } while ( isWriterInProgress || (currentversion != version) );
698 if (((b.hash_coll & 0x7FFFFFFF) == hashcode) &&
699 KeyEquals (b.key, key))
701 bucketNumber = (int) (((long)bucketNumber + incr)% (uint)lbuckets.Length);
702 } while (b.hash_coll < 0 && ++ntry < lbuckets.Length);
707 Insert(key, value, false);
711 // Increases the bucket count of this hashtable. This method is called from
712 // the Insert method when the actual load factor of the hashtable reaches
713 // the upper limit specified when the hashtable was constructed. The number
714 // of buckets in the hashtable is increased to the smallest prime number
715 // that is larger than twice the current number of buckets, and the entries
716 // in the hashtable are redistributed into the new buckets using the cached
718 private void expand() {
719 int rawsize = HashHelpers.ExpandPrime(buckets.Length);
720 rehash(rawsize, false);
723 // We occationally need to rehash the table to clean up the collision bits.
724 private void rehash() {
725 rehash( buckets.Length, false );
728 private void UpdateVersion() {
729 // Version might become negative when version is Int32.MaxValue, but the oddity will be still be correct.
730 // So we don't need to special case this.
734 [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
735 private void rehash( int newsize, bool forceNewHashCode ) {
740 // Don't replace any internal state until we've finished adding to the
741 // new bucket[]. This serves two purposes:
742 // 1) Allow concurrent readers to see valid hashtable contents
744 // 2) Protect against an OutOfMemoryException while allocating this
746 bucket[] newBuckets = new bucket[newsize];
748 // rehash table into new buckets
750 for (nb = 0; nb < buckets.Length; nb++){
751 bucket oldb = buckets[nb];
752 if ((oldb.key != null) && (oldb.key != buckets)) {
753 int hashcode = ((forceNewHashCode ? GetHash(oldb.key) : oldb.hash_coll) & 0x7FFFFFFF);
754 putEntry(newBuckets, oldb.key, oldb.val, hashcode);
758 // New bucket[] is good to go - replace buckets and other internal state.
760 Thread.BeginCriticalRegion();
762 isWriterInProgress = true;
763 buckets = newBuckets;
764 loadsize = (int)(loadFactor * newsize);
766 isWriterInProgress = false;
768 Thread.EndCriticalRegion();
770 // minimun size of hashtable is 3 now and maximum loadFactor is 0.72 now.
771 Contract.Assert(loadsize < newsize, "Our current implementaion means this is not possible.");
775 // Returns an enumerator for this hashtable.
776 // If modifications made to the hashtable while an enumeration is
777 // in progress, the MoveNext and Current methods of the
778 // enumerator will throw an exception.
780 IEnumerator IEnumerable.GetEnumerator() {
781 return new HashtableEnumerator(this, HashtableEnumerator.DictEntry);
784 // Returns a dictionary enumerator for this hashtable.
785 // If modifications made to the hashtable while an enumeration is
786 // in progress, the MoveNext and Current methods of the
787 // enumerator will throw an exception.
789 public virtual IDictionaryEnumerator GetEnumerator() {
790 return new HashtableEnumerator(this, HashtableEnumerator.DictEntry);
793 // Internal method to get the hash code for an Object. This will call
794 // GetHashCode() on each object if you haven't provided an IHashCodeProvider
795 // instance. Otherwise, it calls hcp.GetHashCode(obj).
796 protected virtual int GetHash(Object key)
798 if (_keycomparer != null)
799 return _keycomparer.GetHashCode(key);
800 return key.GetHashCode();
803 // Is this Hashtable read-only?
804 public virtual bool IsReadOnly {
805 get { return false; }
808 public virtual bool IsFixedSize {
809 get { return false; }
812 // Is this Hashtable synchronized? See SyncRoot property
813 public virtual bool IsSynchronized {
814 get { return false; }
817 // Internal method to compare two keys. If you have provided an IComparer
818 // instance in the constructor, this method will call comparer.Compare(item, key).
819 // Otherwise, it will call item.Equals(key).
821 protected virtual bool KeyEquals(Object item, Object key)
823 Contract.Assert(key != null, "key can't be null here!");
824 if( Object.ReferenceEquals(buckets, item)) {
828 if (Object.ReferenceEquals(item,key))
831 if (_keycomparer != null)
832 return _keycomparer.Equals(item, key);
833 return item == null ? false : item.Equals(key);
836 // Returns a collection representing the keys of this hashtable. The order
837 // in which the returned collection represents the keys is unspecified, but
838 // it is guaranteed to be buckets = newBuckets; the same order in which a collection returned by
839 // GetValues represents the values of the hashtable.
841 // The returned collection is live in the sense that any changes
842 // to the hash table are reflected in this collection. It is not
843 // a static copy of all the keys in the hash table.
845 public virtual ICollection Keys {
847 if (keys == null) keys = new KeyCollection(this);
852 // Returns a collection representing the values of this hashtable. The
853 // order in which the returned collection represents the values is
854 // unspecified, but it is guaranteed to be the same order in which a
855 // collection returned by GetKeys represents the keys of the
858 // The returned collection is live in the sense that any changes
859 // to the hash table are reflected in this collection. It is not
860 // a static copy of all the keys in the hash table.
862 public virtual ICollection Values {
864 if (values == null) values = new ValueCollection(this);
869 // Inserts an entry into this hashtable. This method is called from the Set
870 // and Add methods. If the add parameter is true and the given key already
871 // exists in the hashtable, an exception is thrown.
872 [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
873 private void Insert (Object key, Object nvalue, bool add) {
878 throw new ArgumentNullException("key", Environment.GetResourceString("ArgumentNull_Key"));
880 Contract.EndContractBlock();
881 if (count >= loadsize) {
884 else if(occupancy > loadsize && count > 100) {
890 // Assume we only have one thread writing concurrently. Modify
891 // buckets to contain new data, as long as we insert in the right order.
892 uint hashcode = InitHash(key, buckets.Length, out seed, out incr);
894 int emptySlotNumber = -1; // We use the empty slot number to cache the first empty slot. We chose to reuse slots
895 // create by remove that have the collision bit set over using up new slots.
896 int bucketNumber = (int) (seed % (uint)buckets.Length);
899 // Set emptySlot number to current bucket if it is the first available bucket that we have seen
900 // that once contained an entry and also has had a collision.
901 // We need to search this entire collision chain because we have to ensure that there are no
902 // duplicate entries in the table.
903 if (emptySlotNumber == -1 && (buckets[bucketNumber].key == buckets) && (buckets[bucketNumber].hash_coll < 0))//(((buckets[bucketNumber].hash_coll & unchecked(0x80000000))!=0)))
904 emptySlotNumber = bucketNumber;
906 // Insert the key/value pair into this bucket if this bucket is empty and has never contained an entry
908 // This bucket once contained an entry but there has never been a collision
909 if ((buckets[bucketNumber].key == null) ||
910 (buckets[bucketNumber].key == buckets && ((buckets[bucketNumber].hash_coll & unchecked(0x80000000))==0))) {
912 // If we have found an available bucket that has never had a collision, but we've seen an available
913 // bucket in the past that has the collision bit set, use the previous bucket instead
914 if (emptySlotNumber != -1) // Reuse slot
915 bucketNumber = emptySlotNumber;
917 // We pretty much have to insert in this order. Don't set hash
918 // code until the value & key are set appropriately.
920 Thread.BeginCriticalRegion();
922 isWriterInProgress = true;
923 buckets[bucketNumber].val = nvalue;
924 buckets[bucketNumber].key = key;
925 buckets[bucketNumber].hash_coll |= (int) hashcode;
928 isWriterInProgress = false;
930 Thread.EndCriticalRegion();
933 #if FEATURE_RANDOMIZED_STRING_HASHING
935 // coreclr has the randomized string hashing on by default so we don't need to resize at this point
937 if(ntry > HashHelpers.HashCollisionThreshold && HashHelpers.IsWellKnownEqualityComparer(_keycomparer))
939 // PERF: We don't want to rehash if _keycomparer is already a RandomizedObjectEqualityComparer since in some
940 // cases there may not be any strings in the hashtable and we wouldn't get any mixing.
941 if(_keycomparer == null || !(_keycomparer is System.Collections.Generic.RandomizedObjectEqualityComparer))
943 _keycomparer = HashHelpers.GetRandomizedEqualityComparer(_keycomparer);
944 rehash(buckets.Length, true);
947 #endif // !FEATURE_CORECLR
948 #endif // FEATURE_RANDOMIZED_STRING_HASHING
953 // The current bucket is in use
955 // it is available, but has had the collision bit set and we have already found an available bucket
956 if (((buckets[bucketNumber].hash_coll & 0x7FFFFFFF) == hashcode) &&
957 KeyEquals (buckets[bucketNumber].key, key)) {
959 throw new ArgumentException(Environment.GetResourceString("Argument_AddingDuplicate__", buckets[bucketNumber].key, key));
962 Thread.BeginCriticalRegion();
964 isWriterInProgress = true;
965 buckets[bucketNumber].val = nvalue;
967 isWriterInProgress = false;
969 Thread.EndCriticalRegion();
972 #if FEATURE_RANDOMIZED_STRING_HASHING
974 if(ntry > HashHelpers.HashCollisionThreshold && HashHelpers.IsWellKnownEqualityComparer(_keycomparer))
976 // PERF: We don't want to rehash if _keycomparer is already a RandomizedObjectEqualityComparer since in some
977 // cases there may not be any strings in the hashtable and we wouldn't get any mixing.
978 if(_keycomparer == null || !(_keycomparer is System.Collections.Generic.RandomizedObjectEqualityComparer))
980 _keycomparer = HashHelpers.GetRandomizedEqualityComparer(_keycomparer);
981 rehash(buckets.Length, true);
984 #endif // !FEATURE_CORECLR
989 // The current bucket is full, and we have therefore collided. We need to set the collision bit
991 // we have remembered an available slot previously.
992 if (emptySlotNumber == -1) {// We don't need to set the collision bit here since we already have an empty slot
993 if( buckets[bucketNumber].hash_coll >= 0 ) {
994 buckets[bucketNumber].hash_coll |= unchecked((int)0x80000000);
999 bucketNumber = (int) (((long)bucketNumber + incr)% (uint)buckets.Length);
1000 } while (++ntry < buckets.Length);
1002 // This code is here if and only if there were no buckets without a collision bit set in the entire table
1003 if (emptySlotNumber != -1)
1005 // We pretty much have to insert in this order. Don't set hash
1006 // code until the value & key are set appropriately.
1007 #if !FEATURE_CORECLR
1008 Thread.BeginCriticalRegion();
1010 isWriterInProgress = true;
1011 buckets[emptySlotNumber].val = nvalue;
1012 buckets[emptySlotNumber].key = key;
1013 buckets[emptySlotNumber].hash_coll |= (int) hashcode;
1016 isWriterInProgress = false;
1017 #if !FEATURE_CORECLR
1018 Thread.EndCriticalRegion();
1021 #if FEATURE_RANDOMIZED_STRING_HASHING
1022 #if !FEATURE_CORECLR
1023 if(buckets.Length > HashHelpers.HashCollisionThreshold && HashHelpers.IsWellKnownEqualityComparer(_keycomparer))
1025 // PERF: We don't want to rehash if _keycomparer is already a RandomizedObjectEqualityComparer since in some
1026 // cases there may not be any strings in the hashtable and we wouldn't get any mixing.
1027 if(_keycomparer == null || !(_keycomparer is System.Collections.Generic.RandomizedObjectEqualityComparer))
1029 _keycomparer = HashHelpers.GetRandomizedEqualityComparer(_keycomparer);
1030 rehash(buckets.Length, true);
1033 #endif // !FEATURE_CORECLR
1038 // If you see this assert, make sure load factor & count are reasonable.
1039 // Then verify that our double hash function (h2, described at top of file)
1040 // meets the requirements described above. You should never see this assert.
1041 Contract.Assert(false, "hash table insert failed! Load factor too high, or our double hashing function is incorrect.");
1042 throw new InvalidOperationException(Environment.GetResourceString("InvalidOperation_HashInsertFailed"));
1045 private void putEntry (bucket[] newBuckets, Object key, Object nvalue, int hashcode)
1047 Contract.Assert(hashcode >= 0, "hashcode >= 0"); // make sure collision bit (sign bit) wasn't set.
1049 uint seed = (uint) hashcode;
1050 uint incr = (uint)(1 + ((seed * HashPrime) % ((uint)newBuckets.Length - 1)));
1051 int bucketNumber = (int) (seed % (uint)newBuckets.Length);
1054 if ((newBuckets[bucketNumber].key == null) || (newBuckets[bucketNumber].key == buckets)) {
1055 newBuckets[bucketNumber].val = nvalue;
1056 newBuckets[bucketNumber].key = key;
1057 newBuckets[bucketNumber].hash_coll |= hashcode;
1061 if( newBuckets[bucketNumber].hash_coll >= 0 ) {
1062 newBuckets[bucketNumber].hash_coll |= unchecked((int)0x80000000);
1065 bucketNumber = (int) (((long)bucketNumber + incr)% (uint)newBuckets.Length);
1069 // Removes an entry from this hashtable. If an entry with the specified
1070 // key exists in the hashtable, it is removed. An ArgumentException is
1071 // thrown if the key is null.
1073 [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
1074 public virtual void Remove(Object key) {
1076 throw new ArgumentNullException("key", Environment.GetResourceString("ArgumentNull_Key"));
1078 Contract.EndContractBlock();
1079 Contract.Assert(!isWriterInProgress, "Race condition detected in usages of Hashtable - multiple threads appear to be writing to a Hashtable instance simultaneously! Don't do that - use Hashtable.Synchronized.");
1083 // Assuming only one concurrent writer, write directly into buckets.
1084 uint hashcode = InitHash(key, buckets.Length, out seed, out incr);
1088 int bn = (int) (seed % (uint)buckets.Length); // bucketNumber
1091 if (((b.hash_coll & 0x7FFFFFFF) == hashcode) &&
1092 KeyEquals (b.key, key)) {
1093 #if !FEATURE_CORECLR
1094 Thread.BeginCriticalRegion();
1096 isWriterInProgress = true;
1097 // Clear hash_coll field, then key, then value
1098 buckets[bn].hash_coll &= unchecked((int)0x80000000);
1099 if (buckets[bn].hash_coll != 0) {
1100 buckets[bn].key = buckets;
1103 buckets[bn].key = null;
1105 buckets[bn].val = null; // Free object references sooner & simplify ContainsValue.
1108 isWriterInProgress = false;
1109 #if !FEATURE_CORECLR
1110 Thread.EndCriticalRegion();
1114 bn = (int) (((long)bn + incr)% (uint)buckets.Length);
1115 } while (b.hash_coll < 0 && ++ntry < buckets.Length);
1117 //throw new ArgumentException(Environment.GetResourceString("Arg_RemoveArgNotFound"));
1120 // Returns the object to synchronize on for this hash table.
1121 public virtual Object SyncRoot {
1123 if( _syncRoot == null) {
1124 System.Threading.Interlocked.CompareExchange<Object>(ref _syncRoot, new Object(), null);
1130 // Returns the number of associations in this hashtable.
1132 public virtual int Count {
1133 get { return count; }
1136 // Returns a thread-safe wrapper for a Hashtable.
1138 [HostProtection(Synchronization=true)]
1139 public static Hashtable Synchronized(Hashtable table) {
1141 throw new ArgumentNullException("table");
1142 Contract.EndContractBlock();
1143 return new SyncHashtable(table);
1147 // The ISerializable Implementation
1150 [System.Security.SecurityCritical]
1151 public virtual void GetObjectData(SerializationInfo info, StreamingContext context) {
1153 throw new ArgumentNullException("info");
1155 Contract.EndContractBlock();
1156 // This is imperfect - it only works well if all other writes are
1157 // also using our synchronized wrapper. But it's still a good idea.
1159 // This method hasn't been fully tweaked to be safe for a concurrent writer.
1160 int oldVersion = version;
1161 info.AddValue(LoadFactorName, loadFactor);
1162 info.AddValue(VersionName, version);
1165 // We need to maintain serialization compatibility with Everett and RTM.
1166 // If the comparer is null or a compatible comparer, serialize Hashtable
1167 // in a format that can be deserialized on Everett and RTM.
1169 // Also, if the Hashtable is using randomized hashing, serialize the old
1170 // view of the _keycomparer so perevious frameworks don't see the new types
1171 #pragma warning disable 618
1172 #if FEATURE_RANDOMIZED_STRING_HASHING
1173 IEqualityComparer keyComparerForSerilization = (IEqualityComparer) HashHelpers.GetEqualityComparerForSerialization(_keycomparer);
1175 IEqualityComparer keyComparerForSerilization = _keycomparer;
1178 if( keyComparerForSerilization == null) {
1179 info.AddValue(ComparerName, null,typeof(IComparer));
1180 info.AddValue(HashCodeProviderName, null, typeof(IHashCodeProvider));
1182 else if(keyComparerForSerilization is CompatibleComparer) {
1183 CompatibleComparer c = keyComparerForSerilization as CompatibleComparer;
1184 info.AddValue(ComparerName, c.Comparer, typeof(IComparer));
1185 info.AddValue(HashCodeProviderName, c.HashCodeProvider, typeof(IHashCodeProvider));
1188 info.AddValue(KeyComparerName, keyComparerForSerilization, typeof(IEqualityComparer));
1190 #pragma warning restore 618
1192 info.AddValue(HashSizeName, buckets.Length); //This is the length of the bucket array.
1193 Object [] serKeys = new Object[count];
1194 Object [] serValues = new Object[count];
1195 CopyKeys(serKeys, 0);
1196 CopyValues(serValues,0);
1197 info.AddValue(KeysName, serKeys, typeof(Object[]));
1198 info.AddValue(ValuesName, serValues, typeof(Object[]));
1200 // Explicitly check to see if anyone changed the Hashtable while we
1201 // were serializing it. That's a ---- in their code.
1202 if (version != oldVersion)
1203 throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumFailedVersion));
1208 // DeserializationEvent Listener
1210 public virtual void OnDeserialization(Object sender) {
1211 if (buckets!=null) {
1212 // Somebody had a dependency on this hashtable and fixed us up before the ObjectManager got to it.
1216 SerializationInfo siInfo;
1217 HashHelpers.SerializationInfoTable.TryGetValue(this, out siInfo);
1220 throw new SerializationException(Environment.GetResourceString("Serialization_InvalidOnDeser"));
1226 #pragma warning disable 618
1227 IHashCodeProvider hcp = null;
1228 #pragma warning restore 618
1230 Object [] serKeys = null;
1231 Object [] serValues = null;
1233 SerializationInfoEnumerator enumerator = siInfo.GetEnumerator();
1235 while( enumerator.MoveNext())
1237 switch( enumerator.Name)
1239 case LoadFactorName:
1240 loadFactor = siInfo.GetSingle(LoadFactorName);
1243 hashsize = siInfo.GetInt32(HashSizeName);
1245 case KeyComparerName:
1246 _keycomparer = (IEqualityComparer)siInfo.GetValue(KeyComparerName, typeof(IEqualityComparer));
1249 c = (IComparer)siInfo.GetValue(ComparerName, typeof(IComparer));
1251 case HashCodeProviderName:
1252 #pragma warning disable 618
1253 hcp = (IHashCodeProvider)siInfo.GetValue(HashCodeProviderName, typeof(IHashCodeProvider));
1254 #pragma warning restore 618
1257 serKeys = (Object[])siInfo.GetValue(KeysName, typeof(Object[]));
1260 serValues = (Object[])siInfo.GetValue(ValuesName, typeof(Object[]));
1265 loadsize = (int)(loadFactor*hashsize);
1267 // V1 object doesn't has _keycomparer field.
1268 if ( (_keycomparer == null) && ( (c != null) || (hcp != null) ) ){
1269 _keycomparer = new CompatibleComparer(c,hcp);
1272 buckets = new bucket[hashsize];
1274 if (serKeys==null) {
1275 throw new SerializationException(Environment.GetResourceString("Serialization_MissingKeys"));
1277 if (serValues==null) {
1278 throw new SerializationException(Environment.GetResourceString("Serialization_MissingValues"));
1280 if (serKeys.Length!=serValues.Length) {
1281 throw new SerializationException(Environment.GetResourceString("Serialization_KeyValueDifferentSizes"));
1283 for (int i=0; i<serKeys.Length; i++) {
1284 if (serKeys[i]==null) {
1285 throw new SerializationException(Environment.GetResourceString("Serialization_NullKey"));
1287 Insert(serKeys[i], serValues[i], true);
1290 version = siInfo.GetInt32(VersionName);
1292 HashHelpers.SerializationInfoTable.Remove(this);
1296 // Implements a Collection for the keys of a hashtable. An instance of this
1297 // class is created by the GetKeys method of a hashtable.
1299 private class KeyCollection : ICollection
1301 private Hashtable _hashtable;
1303 internal KeyCollection(Hashtable hashtable) {
1304 _hashtable = hashtable;
1307 public virtual void CopyTo(Array array, int arrayIndex) {
1309 throw new ArgumentNullException("array");
1310 if (array.Rank != 1)
1311 throw new ArgumentException(Environment.GetResourceString("Arg_RankMultiDimNotSupported"));
1313 throw new ArgumentOutOfRangeException("arrayIndex", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
1314 Contract.EndContractBlock();
1315 if (array.Length - arrayIndex < _hashtable.count)
1316 throw new ArgumentException(Environment.GetResourceString("Arg_ArrayPlusOffTooSmall"));
1317 _hashtable.CopyKeys(array, arrayIndex);
1320 public virtual IEnumerator GetEnumerator() {
1321 return new HashtableEnumerator(_hashtable, HashtableEnumerator.Keys);
1324 public virtual bool IsSynchronized {
1325 get { return _hashtable.IsSynchronized; }
1328 public virtual Object SyncRoot {
1329 get { return _hashtable.SyncRoot; }
1332 public virtual int Count {
1333 get { return _hashtable.count; }
1337 // Implements a Collection for the values of a hashtable. An instance of
1338 // this class is created by the GetValues method of a hashtable.
1340 private class ValueCollection : ICollection
1342 private Hashtable _hashtable;
1344 internal ValueCollection(Hashtable hashtable) {
1345 _hashtable = hashtable;
1348 public virtual void CopyTo(Array array, int arrayIndex) {
1350 throw new ArgumentNullException("array");
1351 if (array.Rank != 1)
1352 throw new ArgumentException(Environment.GetResourceString("Arg_RankMultiDimNotSupported"));
1354 throw new ArgumentOutOfRangeException("arrayIndex", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
1355 Contract.EndContractBlock();
1356 if (array.Length - arrayIndex < _hashtable.count)
1357 throw new ArgumentException(Environment.GetResourceString("Arg_ArrayPlusOffTooSmall"));
1358 _hashtable.CopyValues(array, arrayIndex);
1361 public virtual IEnumerator GetEnumerator() {
1362 return new HashtableEnumerator(_hashtable, HashtableEnumerator.Values);
1365 public virtual bool IsSynchronized {
1366 get { return _hashtable.IsSynchronized; }
1369 public virtual Object SyncRoot {
1370 get { return _hashtable.SyncRoot; }
1373 public virtual int Count {
1374 get { return _hashtable.count; }
1378 // Synchronized wrapper for hashtable
1380 private class SyncHashtable : Hashtable, IEnumerable
1382 protected Hashtable _table;
1384 internal SyncHashtable(Hashtable table) : base(false) {
1388 internal SyncHashtable(SerializationInfo info, StreamingContext context) : base (info, context) {
1389 _table = (Hashtable)info.GetValue("ParentTable", typeof(Hashtable));
1391 throw new SerializationException(Environment.GetResourceString("Serialization_InsufficientState"));
1396 /*================================GetObjectData=================================
1397 **Action: Return a serialization info containing a reference to _table. We need
1398 ** to implement this because our parent HT does and we don't want to actually
1399 ** serialize all of it's values (just a reference to the table, which will then
1400 ** be serialized separately.)
1402 **Arguments: info -- the SerializationInfo into which to store the data.
1403 ** context -- the StreamingContext for the current serialization (ignored)
1404 **Exceptions: ArgumentNullException if info is null.
1405 ==============================================================================*/
1406 [System.Security.SecurityCritical] // auto-generated
1407 public override void GetObjectData(SerializationInfo info, StreamingContext context) {
1409 throw new ArgumentNullException("info");
1411 Contract.EndContractBlock();
1412 // Our serialization code hasn't been fully tweaked to be safe
1413 // for a concurrent writer.
1414 lock (_table.SyncRoot) {
1415 info.AddValue("ParentTable", _table, typeof(Hashtable));
1419 public override int Count {
1420 get { return _table.Count; }
1423 public override bool IsReadOnly {
1424 get { return _table.IsReadOnly; }
1427 public override bool IsFixedSize {
1428 get { return _table.IsFixedSize; }
1431 public override bool IsSynchronized {
1432 get { return true; }
1435 public override Object this[Object key] {
1440 lock(_table.SyncRoot) {
1441 _table[key] = value;
1446 public override Object SyncRoot {
1447 get { return _table.SyncRoot; }
1450 public override void Add(Object key, Object value) {
1451 lock(_table.SyncRoot) {
1452 _table.Add(key, value);
1456 public override void Clear() {
1457 lock(_table.SyncRoot) {
1462 public override bool Contains(Object key) {
1463 return _table.Contains(key);
1466 public override bool ContainsKey(Object key) {
1468 throw new ArgumentNullException("key", Environment.GetResourceString("ArgumentNull_Key"));
1470 Contract.EndContractBlock();
1471 return _table.ContainsKey(key);
1474 public override bool ContainsValue(Object key) {
1475 lock(_table.SyncRoot) {
1476 return _table.ContainsValue(key);
1480 public override void CopyTo(Array array, int arrayIndex) {
1481 lock (_table.SyncRoot) {
1482 _table.CopyTo(array, arrayIndex);
1486 public override Object Clone() {
1487 lock (_table.SyncRoot) {
1488 return Hashtable.Synchronized((Hashtable)_table.Clone());
1492 IEnumerator IEnumerable.GetEnumerator() {
1493 return _table.GetEnumerator();
1496 public override IDictionaryEnumerator GetEnumerator() {
1497 return _table.GetEnumerator();
1500 public override ICollection Keys {
1502 lock(_table.SyncRoot) {
1508 public override ICollection Values {
1510 lock(_table.SyncRoot) {
1511 return _table.Values;
1516 public override void Remove(Object key) {
1517 lock(_table.SyncRoot) {
1522 /*==============================OnDeserialization===============================
1523 **Action: Does nothing. We have to implement this because our parent HT implements it,
1524 ** but it doesn't do anything meaningful. The real work will be done when we
1525 ** call OnDeserialization on our parent table.
1529 ==============================================================================*/
1530 public override void OnDeserialization(Object sender) {
1534 internal override KeyValuePairs[] ToKeyValuePairsArray() {
1535 return _table.ToKeyValuePairsArray();
1541 // Implements an enumerator for a hashtable. The enumerator uses the
1542 // internal version number of the hashtabke to ensure that no modifications
1543 // are made to the hashtable while an enumeration is in progress.
1545 private class HashtableEnumerator : IDictionaryEnumerator, ICloneable
1547 private Hashtable hashtable;
1549 private int version;
1550 private bool current;
1551 private int getObjectRetType; // What should GetObject return?
1552 private Object currentKey;
1553 private Object currentValue;
1555 internal const int Keys = 1;
1556 internal const int Values = 2;
1557 internal const int DictEntry = 3;
1559 internal HashtableEnumerator(Hashtable hashtable, int getObjRetType) {
1560 this.hashtable = hashtable;
1561 bucket = hashtable.buckets.Length;
1562 version = hashtable.version;
1564 getObjectRetType = getObjRetType;
1567 public Object Clone() {
1568 return MemberwiseClone();
1571 public virtual Object Key {
1573 if (current == false) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumNotStarted));
1578 public virtual bool MoveNext() {
1579 if (version != hashtable.version) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumFailedVersion));
1580 while (bucket > 0) {
1582 Object keyv = hashtable.buckets[bucket].key;
1583 if ((keyv!= null) && (keyv != hashtable.buckets)) {
1585 currentValue = hashtable.buckets[bucket].val;
1594 public virtual DictionaryEntry Entry {
1596 if (current == false) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumOpCantHappen));
1597 return new DictionaryEntry(currentKey, currentValue);
1602 public virtual Object Current {
1604 if (current == false) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumOpCantHappen));
1606 if (getObjectRetType==Keys)
1608 else if (getObjectRetType==Values)
1609 return currentValue;
1611 return new DictionaryEntry(currentKey, currentValue);
1615 public virtual Object Value {
1617 if (current == false) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumOpCantHappen));
1618 return currentValue;
1622 public virtual void Reset() {
1623 if (version != hashtable.version) throw new InvalidOperationException(Environment.GetResourceString(ResId.InvalidOperation_EnumFailedVersion));
1625 bucket = hashtable.buckets.Length;
1627 currentValue = null;
1631 // internal debug view class for hashtable
1632 internal class HashtableDebugView {
1633 private Hashtable hashtable;
1635 public HashtableDebugView( Hashtable hashtable) {
1636 if( hashtable == null) {
1637 throw new ArgumentNullException( "hashtable");
1639 Contract.EndContractBlock();
1641 this.hashtable = hashtable;
1645 [DebuggerBrowsable(DebuggerBrowsableState.RootHidden)]
1646 public KeyValuePairs[] Items {
1648 return hashtable.ToKeyValuePairsArray();
1654 [FriendAccessAllowed]
1655 internal static class HashHelpers
1658 #if FEATURE_RANDOMIZED_STRING_HASHING
1659 public const int HashCollisionThreshold = 100;
1660 public static bool s_UseRandomizedStringHashing = String.UseRandomizedHashing();
1663 // Table of prime numbers to use as hash table sizes.
1664 // A typical resize algorithm would pick the smallest prime number in this array
1665 // that is larger than twice the previous capacity.
1666 // Suppose our Hashtable currently has capacity x and enough elements are added
1667 // such that a resize needs to occur. Resizing first computes 2x then finds the
1668 // first prime in the table greater than 2x, i.e. if primes are ordered
1669 // p_1, p_2, ..., p_i, ..., it finds p_n such that p_n-1 < 2x < p_n.
1670 // Doubling is important for preserving the asymptotic complexity of the
1671 // hashtable operations such as add. Having a prime guarantees that double
1672 // hashing does not lead to infinite loops. IE, your hash function will be
1673 // h1(key) + i*h2(key), 0 <= i < size. h2 and the size must be relatively prime.
1674 public static readonly int[] primes = {
1675 3, 7, 11, 17, 23, 29, 37, 47, 59, 71, 89, 107, 131, 163, 197, 239, 293, 353, 431, 521, 631, 761, 919,
1676 1103, 1327, 1597, 1931, 2333, 2801, 3371, 4049, 4861, 5839, 7013, 8419, 10103, 12143, 14591,
1677 17519, 21023, 25229, 30293, 36353, 43627, 52361, 62851, 75431, 90523, 108631, 130363, 156437,
1678 187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403, 968897, 1162687, 1395263,
1679 1674319, 2009191, 2411033, 2893249, 3471899, 4166287, 4999559, 5999471, 7199369};
1681 // Used by Hashtable and Dictionary's SeralizationInfo .ctor's to store the SeralizationInfo
1682 // object until OnDeserialization is called.
1683 private static ConditionalWeakTable<object, SerializationInfo> s_SerializationInfoTable;
1685 internal static ConditionalWeakTable<object, SerializationInfo> SerializationInfoTable
1689 if(s_SerializationInfoTable == null)
1691 ConditionalWeakTable<object, SerializationInfo> newTable = new ConditionalWeakTable<object, SerializationInfo>();
1692 Interlocked.CompareExchange(ref s_SerializationInfoTable, newTable, null);
1695 return s_SerializationInfoTable;
1700 [ReliabilityContract(Consistency.WillNotCorruptState, Cer.Success)]
1701 public static bool IsPrime(int candidate)
1703 if ((candidate & 1) != 0)
1705 int limit = (int)Math.Sqrt (candidate);
1706 for (int divisor = 3; divisor <= limit; divisor+=2)
1708 if ((candidate % divisor) == 0)
1713 return (candidate == 2);
1716 [ReliabilityContract(Consistency.WillNotCorruptState, Cer.Success)]
1717 public static int GetPrime(int min)
1720 throw new ArgumentException(Environment.GetResourceString("Arg_HTCapacityOverflow"));
1721 Contract.EndContractBlock();
1723 for (int i = 0; i < primes.Length; i++)
1725 int prime = primes[i];
1726 if (prime >= min) return prime;
1729 //outside of our predefined table.
1730 //compute the hard way.
1731 for (int i = (min | 1); i < Int32.MaxValue;i+=2)
1733 if (IsPrime(i) && ((i - 1) % Hashtable.HashPrime != 0))
1739 public static int GetMinPrime()
1744 // Returns size of hashtable to grow to.
1745 public static int ExpandPrime(int oldSize)
1747 int newSize = 2 * oldSize;
1749 // Allow the hashtables to grow to maximum possible size (~2G elements) before encoutering capacity overflow.
1750 // Note that this check works even when _items.Length overflowed thanks to the (uint) cast
1751 if ((uint)newSize > MaxPrimeArrayLength && MaxPrimeArrayLength > oldSize)
1753 Contract.Assert( MaxPrimeArrayLength == GetPrime(MaxPrimeArrayLength), "Invalid MaxPrimeArrayLength");
1754 return MaxPrimeArrayLength;
1757 return GetPrime(newSize);
1761 // This is the maximum prime smaller than Array.MaxArrayLength
1762 public const int MaxPrimeArrayLength = 0x7FEFFFFD;
1764 #if FEATURE_RANDOMIZED_STRING_HASHING
1765 public static bool IsWellKnownEqualityComparer(object comparer)
1767 return (comparer == null || comparer == System.Collections.Generic.EqualityComparer<string>.Default || comparer is IWellKnownStringEqualityComparer);
1770 public static IEqualityComparer GetRandomizedEqualityComparer(object comparer)
1772 Contract.Assert(comparer == null || comparer == System.Collections.Generic.EqualityComparer<string>.Default || comparer is IWellKnownStringEqualityComparer);
1774 if(comparer == null) {
1775 return new System.Collections.Generic.RandomizedObjectEqualityComparer();
1778 if(comparer == System.Collections.Generic.EqualityComparer<string>.Default) {
1779 return new System.Collections.Generic.RandomizedStringEqualityComparer();
1782 IWellKnownStringEqualityComparer cmp = comparer as IWellKnownStringEqualityComparer;
1785 return cmp.GetRandomizedEqualityComparer();
1788 Contract.Assert(false, "Missing case in GetRandomizedEqualityComparer!");
1793 public static object GetEqualityComparerForSerialization(object comparer)
1795 if(comparer == null)
1800 IWellKnownStringEqualityComparer cmp = comparer as IWellKnownStringEqualityComparer;
1804 return cmp.GetEqualityComparerForSerialization();
1810 private const int bufferSize = 1024;
1811 private static RandomNumberGenerator rng;
1812 private static byte[] data;
1813 private static int currentIndex = bufferSize;
1814 private static readonly object lockObj = new Object();
1816 internal static long GetEntropy()
1821 if(currentIndex == bufferSize)
1825 rng = RandomNumberGenerator.Create();
1826 data = new byte[bufferSize];
1827 Contract.Assert(bufferSize % 8 == 0, "We increment our current index by 8, so our buffer size must be a multiple of 8");
1834 ret = BitConverter.ToInt64(data, currentIndex);
1840 #endif // FEATURE_RANDOMIZED_STRING_HASHING