piiswrong closed pull request #9409: Random seed setting
URL: https://github.com/apache/incubator-mxnet/pull/9409
This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:
As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):
diff --git a/src/common/random_generator.cu b/src/common/random_generator.cu
index 66969fe89c..f6f31cf5af 100644
--- a/src/common/random_generator.cu
+++ b/src/common/random_generator.cu
@@ -55,6 +55,7 @@ void RandGenerator<gpu, float>::Seed(mshadow::Stream<gpu> *s,
uint32_t seed) {
states_,
RandGenerator<gpu, float>::kNumRandomStates,
seed);
+ s->Wait();
}
} // namespace random
diff --git a/tests/python/unittest/test_random.py
b/tests/python/unittest/test_random.py
index 0efe8e6834..d0c1798eb4 100644
--- a/tests/python/unittest/test_random.py
+++ b/tests/python/unittest/test_random.py
@@ -122,7 +122,7 @@ def check_with_device(device, dtype):
ret1 = ndop(**params).asnumpy()
mx.random.seed(128)
ret2 = ndop(**params).asnumpy()
- assert device.device_type == 'gpu' or same(ret1, ret2), \
+ assert same(ret1, ret2), \
"ndarray test: `%s` should give the same result with the same
seed" % name
for check_name, check_func, tol in symbdic['checks']:
@@ -135,7 +135,7 @@ def check_with_device(device, dtype):
ret1 = ndop(**params).asnumpy()
mx.random.seed(128)
ret2 = ndop(**params).asnumpy()
- assert device.device_type == 'gpu' or same(ret1, ret2), \
+ assert same(ret1, ret2), \
"ndarray test: `%s` should give the same result with the same
seed" % name
for i in range(2):
for j in range(2):
@@ -161,7 +161,7 @@ def check_with_device(device, dtype):
mx.random.seed(128)
yexec.forward()
un2 = (yexec.outputs[0] - x).copyto(device)
- assert device.device_type == 'gpu' or same(un1.asnumpy(),
un2.asnumpy()), \
+ assert same(un1.asnumpy(), un2.asnumpy()), \
"symbolic test: `%s` should give the same result with the same
seed" % name
ret1 = un1.asnumpy()
@@ -197,6 +197,76 @@ def test_random():
check_with_device(mx.context.current_context(), 'float64')
+# Set seed variously based on `start_seed` and `num_init_seeds`, then set seed
finally to `final_seed`
+def set_seed_variously(init_seed, num_init_seeds, final_seed):
+ end_seed = init_seed + num_init_seeds
+ for seed in range(init_seed, end_seed):
+ mx.random.seed(seed)
+ mx.random.seed(final_seed)
+ return end_seed
+
+# Tests that seed setting of std (non-parallel) rng is synchronous w.r.t. rng
use before and after.
+def test_random_seed_setting():
+ ctx = mx.context.current_context()
+ seed_to_test = 1234
+ num_temp_seeds = 25
+ probs = [0.125, 0.25, 0.25, 0.0625, 0.125, 0.1875]
+ num_samples = 100000
+ for dtype in ['float16', 'float32', 'float64']:
+ seed = set_seed_variously(1, num_temp_seeds, seed_to_test)
+ samples1 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx,
dtype=dtype),
+ shape=num_samples)
+ seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
+ samples2 = mx.nd.random.multinomial(data=mx.nd.array(probs, ctx=ctx,
dtype=dtype),
+ shape=num_samples)
+ samples1np = samples1.asnumpy()
+ set_seed_variously(seed, num_temp_seeds, seed_to_test+1)
+ samples2np = samples2.asnumpy()
+ assert same(samples1np, samples2np), \
+ "seed-setting test: `multinomial` should give the same result with
the same seed"
+
+
+# Tests that seed setting of parallel rng is synchronous w.r.t. rng use before
and after.
+def test_parallel_random_seed_setting():
+ ctx = mx.context.current_context()
+ seed_to_test = 1234
+ for dtype in ['float16', 'float32', 'float64']:
+ # Avoid excessive test cpu runtimes
+ num_temp_seeds = 25 if ctx.device_type == 'gpu' else 1
+ # To flush out a possible race condition, run multiple times
+ for _ in range(20):
+ # Create enough samples such that we get a meaningful distribution.
+ shape = (200, 200)
+ params = { 'low': -1.5, 'high': 3.0 }
+ params.update(shape=shape, dtype=dtype, ctx=ctx)
+
+ # check directly
+ seed = set_seed_variously(1, num_temp_seeds, seed_to_test)
+ ret1 = mx.nd.random.uniform(**params)
+ seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
+ ret2 = mx.nd.random.uniform(**params)
+ seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
+ assert same(ret1.asnumpy(), ret2.asnumpy()), \
+ "ndarray seed-setting test: `uniform` should give the same
result with the same seed"
+
+ # check symbolic
+ X = mx.sym.Variable("X")
+ Y = mx.sym.random.uniform(**params) + X
+ x = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)
+ xgrad = mx.nd.zeros(shape, dtype=dtype, ctx=ctx)
+ yexec = Y.bind(ctx, {'X' : x}, {'X': xgrad})
+ seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
+ yexec.forward(is_train=True)
+ yexec.backward(yexec.outputs[0])
+ un1 = (yexec.outputs[0] - x).copyto(ctx)
+ seed = set_seed_variously(seed, num_temp_seeds, seed_to_test)
+ yexec.forward()
+ set_seed_variously(seed, num_temp_seeds, seed_to_test)
+ un2 = (yexec.outputs[0] - x).copyto(ctx)
+ assert same(un1.asnumpy(), un2.asnumpy()), \
+ "symbolic seed-setting test: `uniform` should give the same
result with the same seed"
+
+
def test_sample_multinomial():
x = mx.nd.array([[0,1,2,3,4],[4,3,2,1,0]])/10.0
dx = mx.nd.ones_like(x)
@@ -240,7 +310,11 @@ def test_uniform_generator():
for dtype in ['float16', 'float32', 'float64']:
for low, high in [(-1.0, 1.0), (1.0, 3.0)]:
print("ctx=%s, dtype=%s, Low=%g, High=%g:" % (ctx, dtype, low,
high))
- buckets, probs = gen_buckets_probs_with_ppf(lambda x:
ss.uniform.ppf(x, loc=low, scale=high - low), 5)
+ scale = high - low
+ buckets, probs = gen_buckets_probs_with_ppf(lambda x:
ss.uniform.ppf(x, loc=low, scale=scale), 5)
+ # Quantize bucket boundaries to reflect the actual dtype and
adjust probs accordingly
+ buckets = np.array(buckets, dtype=dtype).tolist()
+ probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(5)]
generator_mx = lambda x: mx.nd.random.uniform(low, high, shape=x,
ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx, buckets=buckets,
probs=probs)
generator_mx_same_seed = \
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services