Batcher.cs 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. // batching functionality encapsulated into one class.
  2. // -> less complexity
  3. // -> easy to test
  4. //
  5. // IMPORTANT: we use THRESHOLD batching, not MAXED SIZE batching.
  6. // see threshold comments below.
  7. //
  8. // includes timestamp for tick batching.
  9. // -> allows NetworkTransform etc. to use timestamp without including it in
  10. // every single message
  11. using System;
  12. using System.Collections.Generic;
  13. namespace Mirror
  14. {
  15. public class Batcher
  16. {
  17. // batching threshold instead of max size.
  18. // -> small messages are fit into threshold sized batches
  19. // -> messages larger than threshold are single batches
  20. //
  21. // in other words, we fit up to 'threshold' but still allow larger ones
  22. // for two reasons:
  23. // 1.) data races: skipping batching for larger messages would send a
  24. // large spawn message immediately, while others are batched and
  25. // only flushed at the end of the frame
  26. // 2) timestamp batching: if each batch is expected to contain a
  27. // timestamp, then large messages have to be a batch too. otherwise
  28. // they would not contain a timestamp
  29. readonly int threshold;
  30. // TimeStamp header size for those who need it
  31. public const int HeaderSize = sizeof(double);
  32. // full batches ready to be sent.
  33. // DO NOT queue NetworkMessage, it would box.
  34. // DO NOT queue each serialization separately.
  35. // it would allocate too many writers.
  36. // https://github.com/vis2k/Mirror/pull/3127
  37. // => best to build batches on the fly.
  38. Queue<NetworkWriterPooled> batches = new Queue<NetworkWriterPooled>();
  39. // current batch in progress
  40. NetworkWriterPooled batch;
  41. public Batcher(int threshold)
  42. {
  43. this.threshold = threshold;
  44. }
  45. // add a message for batching
  46. // we allow any sized messages.
  47. // caller needs to make sure they are within max packet size.
  48. public void AddMessage(ArraySegment<byte> message, double timeStamp)
  49. {
  50. // when appending to a batch in progress, check final size.
  51. // if it expands beyond threshold, then we should finalize it first.
  52. // => less than or exactly threshold is fine.
  53. // GetBatch() will finalize it.
  54. // => see unit tests.
  55. if (batch != null &&
  56. batch.Position + message.Count > threshold)
  57. {
  58. batches.Enqueue(batch);
  59. batch = null;
  60. }
  61. // initialize a new batch if necessary
  62. if (batch == null)
  63. {
  64. // borrow from pool. we return it in GetBatch.
  65. batch = NetworkWriterPool.Get();
  66. // write timestamp first.
  67. // -> double precision for accuracy over long periods of time
  68. // -> batches are per-frame, it doesn't matter which message's
  69. // timestamp we use.
  70. batch.WriteDouble(timeStamp);
  71. }
  72. // add serialization to current batch. even if > threshold.
  73. // -> we do allow > threshold sized messages as single batch
  74. // -> WriteBytes instead of WriteSegment because the latter
  75. // would add a size header. we want to write directly.
  76. batch.WriteBytes(message.Array, message.Offset, message.Count);
  77. }
  78. // helper function to copy a batch to writer and return it to pool
  79. static void CopyAndReturn(NetworkWriterPooled batch, NetworkWriter writer)
  80. {
  81. // make sure the writer is fresh to avoid uncertain situations
  82. if (writer.Position != 0)
  83. throw new ArgumentException($"GetBatch needs a fresh writer!");
  84. // copy to the target writer
  85. ArraySegment<byte> segment = batch.ToArraySegment();
  86. writer.WriteBytes(segment.Array, segment.Offset, segment.Count);
  87. // return batch to pool for reuse
  88. NetworkWriterPool.Return(batch);
  89. }
  90. // get the next batch which is available for sending (if any).
  91. // TODO safely get & return a batch instead of copying to writer?
  92. // TODO could return pooled writer & use GetBatch in a 'using' statement!
  93. public bool GetBatch(NetworkWriter writer)
  94. {
  95. // get first batch from queue (if any)
  96. if (batches.TryDequeue(out NetworkWriterPooled first))
  97. {
  98. CopyAndReturn(first, writer);
  99. return true;
  100. }
  101. // if queue was empty, we can send the batch in progress.
  102. if (batch != null)
  103. {
  104. CopyAndReturn(batch, writer);
  105. batch = null;
  106. return true;
  107. }
  108. // nothing was written
  109. return false;
  110. }
  111. }
  112. }