-
由 openaiops 创作于07a0fc69
latency_codec.py 4.92 KiB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
from typing import *
import numpy as np
from tracegnn.constants import *
if not USE_MULTI_DIM_LATENCY_CODEC:
__all__ = []
else:
__all__ = [
'encode_multi_latency',
'decode_multi_latency',
'encode_latency',
'decode_latency',
]
EPS = 1e-6
def encode_multi_latency(latencies: Sequence[np.ndarray],
max_latency_dims: int
) -> Tuple[np.ndarray, np.ndarray]:
"""
Encode multiple latencies into (codec, onehot) feature vectors.
If `max_latency_dims` is sufficient:
>>> latencies = [np.array([0.0, 9.6, 10.3, 58.7, 101.2]), np.array([11.3, 0.6, 0.0, 99.1, 100.0])]
>>> codec, onehot = encode_multi_latency(latencies, 3)
>>> codec
array([[-1. , -1. , -1. , -0.74, -0.8 , -1. ],
[ 0.92, -1. , -1. , -0.88, -1. , -1. ],
[-0.94, -0.8 , -1. , -1. , -1. , -1. ],
[ 0.74, 0. , -1. , 0.82, 0.8 , -1. ],
[-0.76, -1. , -0.8 , -1. , -1. , -0.8 ]])
>>> onehot
array([[ True, False, False, False, True, False],
[ True, False, False, True, False, False],
[False, True, False, True, False, False],
[False, True, False, False, True, False],
[False, False, True, False, False, True]])
>>> decode_multi_latency(codec, onehot, 3)
[array([ 0. , 9.6, 10.3, 58.7, 101.2]), array([ 11.3, 0.6, 0. , 99.1, 100. ])]
If `max_latency_dims` is partially sufficient:
>>> latencies = [np.array([9.6, 10.3, 58.7, 101.2]), np.array([11.3, 0.6, 99.1, 100.0])]
>>> codec, onehot = encode_multi_latency(latencies, 2)
>>> codec
array([[ 0.92, -1. , -0.74, -0.8 ],
[-0.94, -0.8 , -0.88, -1. ],
[ 0.74, 0. , 0.82, 0.8 ],
[-0.76, 1. , -1. , 1. ]])
>>> onehot
array([[ True, False, False, True],
[False, True, True, False],
[False, True, False, True],
[False, True, False, True]])
>>> decode_multi_latency(codec, onehot, 2)
[array([ 9.6, 10.3, 58.7, 101.2]), array([ 11.3, 0.6, 99.1, 100. ])]
If `max_latency_dims` is insufficient:
>>> latencies = [np.array([9.6, 10.3, 58.7, 101.2]), np.array([11.3, 0.6, 99.1, 100.0])]
>>> codec, onehot = encode_multi_latency(latencies, 1)
>>> codec
array([[ 0.92, 1.26],
[ 1.06, -0.88],
[10.74, 18.82],
[19.24, 19. ]])
>>> onehot
array([[ True, True],
[ True, True],
[ True, True],
[ True, True]])
>>> decode_multi_latency(codec, onehot, 1)
[array([ 9.6, 10.3, 58.7, 101.2]), array([ 11.3, 0.6, 99.1, 100. ])]
"""
codec, onehot = [], []
for residual in latencies:
for i in range(max_latency_dims - 1):
if i == 0:
onehot.append(residual < 10)
else:
onehot.append(np.logical_and(EPS < residual, residual < 10))
r = residual % 10
codec.append(r)
residual = (residual - r) / 10
onehot.append(EPS < residual)
codec.append(residual)
codec, onehot = np.stack(codec, axis=-1), np.stack(onehot, axis=-1)
codec = codec / 5. - 1 # scale to [-1, 1]
return codec, onehot
def decode_multi_latency(codec: np.ndarray,
onehot: np.ndarray,
max_latency_dims: int
) -> List[np.ndarray]:
if codec.shape[-1] % max_latency_dims != 0:
raise ValueError(
f'arr.shape[-1] % max_latency_dims != 0: '
f'arr.shape = {codec.shape!r}, where max_latency_dims = {max_latency_dims!r}'
)
ret = []
codec = (np.clip(codec, -1, 1) + 1) * 5 # scale back from [-1, 1]
for i in range(codec.shape[-1] // max_latency_dims):
left = i * max_latency_dims
right = left + max_latency_dims - 1
m = onehot[..., right]
r = codec[..., right] * m.astype(np.float32)
while right > left:
r = r * 10
right -= 1
m |= onehot[..., right]
r += codec[..., right]
ret.append(r)
return ret
def encode_latency(latency: np.ndarray,
max_latency_dims: int
) -> Tuple[np.ndarray, np.ndarray]:
return encode_multi_latency([latency], max_latency_dims)
def decode_latency(codec: np.ndarray,
onehot: np.ndarray,
max_latency_dims: int
) -> np.ndarray:
return decode_multi_latency(codec, onehot, max_latency_dims)[0]