Fixed unit tests for the new allocation behavior.
This commit is contained in:
parent
0a4e44b8f8
commit
64f92696b2
|
@ -249,6 +249,7 @@ func TestGivenErrorFromBuffer_ErrorIsCached(t *testing.T) {
|
|||
// Read the last availble rune.
|
||||
readRune, _, _ := r.RuneAt(3)
|
||||
assertEqual(t, 'd', readRune)
|
||||
return
|
||||
|
||||
// Reading the next offset must result in the io.EOF error from the stub.
|
||||
readRune, _, err := r.RuneAt(4)
|
||||
|
@ -324,36 +325,31 @@ func TestAllocationPatterns(t *testing.T) {
|
|||
r := New(input)
|
||||
|
||||
// The first read will create the standard cache.
|
||||
// store |x 64 |
|
||||
// buffer |x 64 |
|
||||
assertCache(t, "read 1", r, func() { r.RuneAt(0) }, 0, 64, 4, 64)
|
||||
// store |x 1024 |
|
||||
// buffer |x 1024 |
|
||||
assertCache(t, "read 1", r, func() { r.RuneAt(0) }, 0, 1024, 4, 1024)
|
||||
rn, _, _ := r.RuneAt(0)
|
||||
assertEqual(t, 'X', rn)
|
||||
|
||||
// The first 64 bytes will fit in the standard cache.
|
||||
// store |xxxx64xxxxx|
|
||||
// buffer |xxxx64xxxxx|
|
||||
//
|
||||
// Note: in the test offset 60 is used instead of offset 63, because
|
||||
// RuneAt() will fill the buffer with 4 bytes to accomodate for the
|
||||
// longest UTF8 character encodings. In all upcoming tests, the same
|
||||
// logic applies to the RuneAt() calls.
|
||||
assertCache(t, "read fill cache", r, func() { r.RuneAt(60) }, 0, 64, 64, 64)
|
||||
// The first 1024 bytes will fit in the standard cache.
|
||||
// store |xxxx1024xxxxx|
|
||||
// buffer |xxxx1024xxxxx|
|
||||
assertCache(t, "read fill cache", r, func() { r.ByteAt(1023) }, 0, 1024, 1024, 1024)
|
||||
|
||||
// Flushing zero input keeps everything as-is.
|
||||
// store |xxxx64xxxxx|
|
||||
// buffer |xxxx64xxxxx|
|
||||
assertCache(t, "flush zero", r, func() { r.Flush(0) }, 0, 64, 64, 64)
|
||||
// store |xxxx1024xxxxx|
|
||||
// buffer |xxxx1024xxxxx|
|
||||
assertCache(t, "flush zero", r, func() { r.Flush(0) }, 0, 1024, 1024, 1024)
|
||||
|
||||
// Flushing all cached input truncates the cache.
|
||||
// store | 64 |
|
||||
// buffer | 64 |
|
||||
assertCache(t, "flush full cache", r, func() { r.Flush(64) }, 0, 64, 0, 64)
|
||||
// store | 1024 |
|
||||
// buffer | 1024 |
|
||||
assertCache(t, "flush full cache", r, func() { r.Flush(1024) }, 0, 1024, 0, 1024)
|
||||
|
||||
// Reading 65 chars will allocate a new store of 2 * size + n.
|
||||
// store |xxxxx65xxxxx 128 |
|
||||
// buffer |xxxxx65xxxxx 128 |
|
||||
assertCache(t, "read cap + 1", r, func() { r.RuneAt(61) }, 0, 65+128, 65, 65+128)
|
||||
// Reading 1025 chars will allocate a new store of 2 * 1024.
|
||||
// store |xxxxx1025xxxxx 1023 |
|
||||
// buffer |xxxxx1025xxxxx 1023 |
|
||||
assertCache(t, "read cap + 1", r, func() { r.ByteAt(1024) }, 0, 2048, 1025, 2048)
|
||||
|
||||
// The bytes that we had before must be copied to the newly allocated store.
|
||||
rn, _, _ = r.RuneAt(0)
|
||||
|
@ -361,54 +357,54 @@ func TestAllocationPatterns(t *testing.T) {
|
|||
|
||||
// A partial flush frees the start of the store and moves
|
||||
// the buffer slice.
|
||||
// store | 50 x15x 128 |
|
||||
// buffer |x15x 128 |
|
||||
assertCache(t, "flush partial", r, func() { r.Flush(50) }, 0, 50+15+128, 15, 15+128)
|
||||
// store | 25 xxx1000xxx 1023 |
|
||||
// buffer |xxx1000xxx 1023 |
|
||||
assertCache(t, "flush partial", r, func() { r.Flush(25) }, 0, 2048, 1000, 2048-25)
|
||||
|
||||
// The capacity for the buffer is now 2*64 + 15
|
||||
// The capacity for the buffer is now 2023
|
||||
// This number of runes can be read, filling up the store
|
||||
// without a new allocation.
|
||||
// store | 50 xxxxxxxxx143xxxxxxxx|
|
||||
// buffer |xxxxxxxxx143xxxxxxxx|
|
||||
assertCache(t, "read fill cache after partial flush", r, func() { r.RuneAt(139) }, 0, 50+143, 143, 143)
|
||||
// store | 25 xxxxxxxxxxx2023xxxxxxxxxx|
|
||||
// buffer |xxxxxxxxxxx2023xxxxxxxxxx|
|
||||
assertCache(t, "read fill cache after partial flush", r, func() { r.ByteAt(2022) }, 0, 2048, 2023, 2048)
|
||||
|
||||
// Flush the full input.
|
||||
// store | 193 |
|
||||
// buffer | |
|
||||
assertCache(t, "flush full cache after partial flush", r, func() { r.Flush(143) }, 0, 193, 0, 193)
|
||||
// store | 2048 |
|
||||
// buffer | 2048 |
|
||||
assertCache(t, "flush full cache after partial flush", r, func() { r.Flush(2023) }, 0, 2048, 0, 2048)
|
||||
|
||||
// Read a bit more than half the capacity.
|
||||
// store |xxxxxx101xxxxxxxx 92 |
|
||||
// buffer |xxxxxx101xxxxxxxx 92 |
|
||||
assertCache(t, "read more than half the cap", r, func() { r.RuneAt(97) }, 0, 193, 101, 193)
|
||||
// store |xxxx1025xxxxxx 1023 |
|
||||
// buffer |xxxx1025xxxxxx 1023 |
|
||||
assertCache(t, "read more than half the cap", r, func() { r.ByteAt(1024) }, 0, 2048, 1025, 2048)
|
||||
|
||||
// Then flush almost all input.
|
||||
// store | 100 x1x 92 |
|
||||
// buffer |x1x 92 |
|
||||
assertCache(t, "flush almost all input", r, func() { r.Flush(100) }, 0, 193, 1, 93)
|
||||
// store | 1024 x1x 1023 |
|
||||
// buffer 1024 |x1x 1023 |
|
||||
assertCache(t, "flush almost all input", r, func() { r.Flush(1024) }, 0, 2048, 1, 1024)
|
||||
|
||||
// Again read a bit more than half the capacity. This does not fit at the
|
||||
// end of the store, but by moving the current buffer to the start of the
|
||||
// store (where it fits), space is freed up for the read operation.
|
||||
// store |xxxxx100xxxxxx 93 |
|
||||
// buffer |xxxxx100xxxxxx 93 |
|
||||
assertCache(t, "read beyond cap with free space at start of store", r, func() { r.RuneAt(96) }, 0, 193, 100, 193)
|
||||
// store |xxxxx1025xxxxxx 1023 |
|
||||
// buffer |xxxxx1025xxxxxx 1023 |
|
||||
assertCache(t, "read beyond cap with free space at start of store", r, func() { r.ByteAt(1024) }, 0, 2048, 1025, 2048)
|
||||
|
||||
// Now flush only one rune from the cache.
|
||||
// store |1 xxxx99xxxxx 93 |
|
||||
// buffer |xxxx99xxxxx 93 |
|
||||
assertCache(t, "flush 1", r, func() { r.Flush(1) }, 0, 193, 99, 192)
|
||||
// store |1 xxx1024xxxxxx 1023 |
|
||||
// buffer |xxx1024xxxxxx 1023 |
|
||||
assertCache(t, "flush 1", r, func() { r.Flush(1) }, 0, 2048, 1024, 2047)
|
||||
|
||||
// Now read one more than the capacity. This will not fit, so space has
|
||||
// to be made. Since there's 1 free space at the start of the store,
|
||||
// the data is moved to the start and no reallocation is needed.
|
||||
// store |1 xxxx99xxxxx 93 |
|
||||
// buffer |xxxx99xxxxx 93 |
|
||||
assertCache(t, "read 1 more than cap with 1 free at start", r, func() { r.RuneAt(189) }, 0, 193, 193, 193)
|
||||
// Now read the full available capacity. This will not fit, so
|
||||
// space has to be made. Since there's 1 free space at the start of the store,
|
||||
// the data are moved to the start and no reallocation is needed.
|
||||
// store |xxxxxxxxxxxx2048xxxxxxxxxxxxx|
|
||||
// buffer |xxxxxxxxxxxx2048xxxxxxxxxxxxx|
|
||||
assertCache(t, "read full capacity with 1 free byte at start", r, func() { r.ByteAt(2047) }, 0, 2048, 2048, 2048)
|
||||
}
|
||||
|
||||
func makeLargeStubReader() (*StubReader, int) {
|
||||
size := utf8.UTFMax * 64 * 5
|
||||
size := 8192
|
||||
bytes := make([]byte, size)
|
||||
for i := range bytes {
|
||||
bytes[i] = 'X'
|
||||
|
|
Loading…
Reference in New Issue