Batcher.cs 3.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. // batching functionality encapsulated into one class.
  2. // -> less complexity
  3. // -> easy to test
  4. //
  5. // IMPORTANT: we use THRESHOLD batching, not MAXED SIZE batching.
  6. // see threshold comments below.
  7. //
  8. // includes timestamp for tick batching.
  9. // -> allows NetworkTransform etc. to use timestamp without including it in
  10. // every single message
  11. using System;
  12. using System.Collections.Generic;
  13. namespace Mirror
  14. {
  15. public class Batcher
  16. {
  17. // batching threshold instead of max size.
  18. // -> small messages are fit into threshold sized batches
  19. // -> messages larger than threshold are single batches
  20. //
  21. // in other words, we fit up to 'threshold' but still allow larger ones
  22. // for two reasons:
  23. // 1.) data races: skipping batching for larger messages would send a
  24. // large spawn message immediately, while others are batched and
  25. // only flushed at the end of the frame
  26. // 2) timestamp batching: if each batch is expected to contain a
  27. // timestamp, then large messages have to be a batch too. otherwise
  28. // they would not contain a timestamp
  29. readonly int threshold;
  30. // TimeStamp header size for those who need it
  31. public const int HeaderSize = sizeof(double);
  32. // batched messages
  33. // IMPORTANT: we queue the serialized messages!
  34. // queueing NetworkMessage would box and allocate!
  35. Queue<PooledNetworkWriter> messages = new Queue<PooledNetworkWriter>();
  36. public Batcher(int threshold)
  37. {
  38. this.threshold = threshold;
  39. }
  40. // add a message for batching
  41. // we allow any sized messages.
  42. // caller needs to make sure they are within max packet size.
  43. public void AddMessage(ArraySegment<byte> message)
  44. {
  45. // put into a (pooled) writer
  46. // -> WriteBytes instead of WriteSegment because the latter
  47. // would add a size header. we want to write directly.
  48. // -> will be returned to pool when making the batch!
  49. // IMPORTANT: NOT adding a size header / msg saves LOTS of bandwidth
  50. PooledNetworkWriter writer = NetworkWriterPool.GetWriter();
  51. writer.WriteBytes(message.Array, message.Offset, message.Count);
  52. messages.Enqueue(writer);
  53. }
  54. // batch as many messages as possible into writer
  55. // returns true if any batch was made.
  56. public bool MakeNextBatch(NetworkWriter writer, double timeStamp)
  57. {
  58. // if we have no messages then there's nothing to do
  59. if (messages.Count == 0)
  60. return false;
  61. // make sure the writer is fresh to avoid uncertain situations
  62. if (writer.Position != 0)
  63. throw new ArgumentException($"MakeNextBatch needs a fresh writer!");
  64. // write timestamp first
  65. // -> double precision for accuracy over long periods of time
  66. writer.WriteDouble(timeStamp);
  67. // do start no matter what
  68. do
  69. {
  70. // add next message no matter what. even if > threshold.
  71. // (we do allow > threshold sized messages as single batch)
  72. PooledNetworkWriter message = messages.Dequeue();
  73. ArraySegment<byte> segment = message.ToArraySegment();
  74. writer.WriteBytes(segment.Array, segment.Offset, segment.Count);
  75. // return the writer to pool
  76. NetworkWriterPool.Recycle(message);
  77. }
  78. // keep going as long as we have more messages,
  79. // AND the next one would fit into threshold.
  80. while (messages.Count > 0 &&
  81. writer.Position + messages.Peek().Position <= threshold);
  82. // we had messages, so a batch was made
  83. return true;
  84. }
  85. }
  86. }