琴师你好, 异常栈信息org.apache.flink.table.api.ValidationException: A tumble window expects a size value literal. 看起来是接下tumble window定义的代码不太正确吧
Best, Shuiqiang 奇怪的不朽琴师 <[email protected]> 于2020年7月15日周三 上午10:27写道: > 你好: > > 我按着你回复的建议改了source但是会报新的错误,请问这个是因为什么?我想调试一个window一直没有成功,请帮帮我,谢谢。 > Traceback (most recent call last): > File "tou.py", line 71, in <module> > from_kafka_to_kafka_demo() > File "tou.py", line 21, in from_kafka_to_kafka_demo > .select(" id, time1 , time1 ")\ > File > "/usr/local/lib/python3.7/site-packages/pyflink/table/table.py", line 907, > in select > return Table(self._j_table.select(fields), self._t_env) > File "/usr/local/lib/python3.7/site-packages/py4j/java_gateway.py", > line 1286, in __call__ > answer, self.gateway_client, self.target_id, self.name) > File > "/usr/local/lib/python3.7/site-packages/pyflink/util/exceptions.py", line > 147, in deco > return f(*a, **kw) > File "/usr/local/lib/python3.7/site-packages/py4j/protocol.py", > line 328, in get_return_value > format(target_id, ".", name), value) > py4j.protocol.Py4JJavaError: An error occurred while calling o26.select. > : org.apache.flink.table.api.ValidationException: A tumble window expects > a size value literal. > at > org.apache.flink.table.operations.utils.AggregateOperationFactory.getAsValueLiteral(AggregateOperationFactory.java:384) > at > org.apache.flink.table.operations.utils.AggregateOperationFactory.validateAndCreateTumbleWindow(AggregateOperationFactory.java:302) > at > org.apache.flink.table.operations.utils.AggregateOperationFactory.createResolvedWindow(AggregateOperationFactory.java:236) > at > org.apache.flink.table.operations.utils.OperationTreeBuilder.windowAggregate(OperationTreeBuilder.java:250) > at > org.apache.flink.table.api.internal.TableImpl$WindowGroupedTableImpl.select(TableImpl.java:794) > at > org.apache.flink.table.api.internal.TableImpl$WindowGroupedTableImpl.select(TableImpl.java:781) > at > sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at > java.lang.reflect.Method.invoke(Method.java:498) > at > org.apache.flink.api.python.shaded.py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244) > at > org.apache.flink.api.python.shaded.py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357) > at > org.apache.flink.api.python.shaded.py4j.Gateway.invoke(Gateway.java:282) > at > org.apache.flink.api.python.shaded.py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132) > at > org.apache.flink.api.python.shaded.py4j.commands.CallCommand.execute(CallCommand.java:79) > at > org.apache.flink.api.python.shaded.py4j.GatewayConnection.run(GatewayConnection.java:238) > at java.lang.Thread.run(Thread.java:748) > > > > > > > > > > > > > > > > > > > > > > > > def register_rides_source(st_env): > source_ddl = \ > """ > create table source1( > id int, > time1 timestamp, > type string, > WATERMARK FOR time1 as time1 - INTERVAL '2' SECOND > ) with ( > 'connector.type' = 'kafka', > 'update-mode' = 'append', > 'connector.topic' = 'tp1', > 'connector.properties.bootstrap.servers' = 'localhost:9092', > 'connector.properties.zookeeper.connect' = 'localhost:2181', > 'format.type' = 'json', > 'format.derive-schema' = 'true', > 'connector.version' = 'universal' > ) > """ > st_env.sql_update(source_ddl) > > > s_env = > StreamExecutionEnvironment.get_execution_environment() > s_env.set_parallelism(1) > > > st_env = StreamTableEnvironment.create(s_env) > > > register_rides_source(st_env) > register_rides_sink(st_env) > > > st_env.from_path("source1")\ > > .window(Tumble.over("2.secends").on("time1").alias("w")) \ > .group_by("w") \ > .select(" id, time1 , time1 ")\ > .insert_into("sink1") > > st_env.execute("2-from_kafka_to_kafka") > > > 代码如上 > > > > > > > > > ------------------ 原始邮件 ------------------ > 发件人: > "user-zh" > < > [email protected]>; > 发送时间: 2020年7月10日(星期五) 上午9:17 > 收件人: "user-zh"<[email protected]>; > > 主题: Re: pyflink1.11.0window > > > > 琴师你好, > > 你的source ddl里有指定time1为 time attribute吗? > create table source1( > id int, > time1 timestamp, > type string, > WATERMARK FOR time1 as time1 - > INTERVAL '2' SECOND > ) with (...) > > 奇怪的不朽琴师 <[email protected]> 于2020年7月10日周五 上午8:43写道: > > > ------------------&nbsp;原始邮件&nbsp;------------------ > > 发件人: > > > "奇怪的不朽琴师" > > > < > > [email protected]&gt;; > > 发送时间:&nbsp;2020年7月9日(星期四) 下午5:08 > > 收件人:&nbsp;"godfrey he"<[email protected]&gt;; > > > > 主题:&nbsp;pyflink1.11.0window > > > > > > > > 你好: > > &nbsp; &nbsp;我在使用pyflink1.11版本时,window开窗仍会报错 > > : org.apache.flink.table.api.ValidationException: A group window > expects a > > time attribute for grouping in a stream environment. > > > > 请问这个问题没有修复么?或者是我使用的方式不对,如果是使用不对,能提供一个正确的案例么? > > 代码如下 > > 谢谢 > > > > > > def from_kafka_to_kafka_demo(): > > &nbsp; &nbsp; s_env = > > StreamExecutionEnvironment.get_execution_environment() > > &nbsp; &nbsp; s_env.set_parallelism(1) > > > > > > &nbsp; &nbsp; # use blink table planner > > &nbsp; &nbsp; st_env = StreamTableEnvironment.create(s_env) > > > > > > &nbsp; &nbsp; # register source and sink > > &nbsp; &nbsp; register_rides_source(st_env) > > &nbsp; &nbsp; register_rides_sink(st_env) > > > > > > &nbsp; &nbsp; st_env.from_path("source1")\ > > &nbsp; &nbsp; &nbsp; &nbsp; > > .window(Tumble.over("1.secends").on("time1").alias("w")) \ > > &nbsp; &nbsp; &nbsp; &nbsp; .group_by("w") \ > > &nbsp; &nbsp; &nbsp; &nbsp; .select(" id,&nbsp; > time1 , time1 ")\ > > &nbsp; &nbsp; &nbsp; &nbsp; .insert_into("sink1") > > &nbsp; &nbsp; > > &nbsp; &nbsp; st_env.execute("2-from_kafka_to_kafka") > > > > > > > > > > def register_rides_source(st_env): > > &nbsp; &nbsp; source_ddl = \ > > &nbsp; &nbsp; ''' > > &nbsp; &nbsp; create table source1( > > &nbsp; &nbsp; &nbsp; &nbsp; id int, > > &nbsp; &nbsp; &nbsp;time1 timestamp, > > &nbsp; &nbsp; &nbsp;type string > > &nbsp; &nbsp; &nbsp;) with ( > > &nbsp; &nbsp; 'connector.type' = 'kafka', > > &nbsp; &nbsp; 'update-mode' = 'append', > > &nbsp; &nbsp; 'connector.topic' = 'tp1', > > &nbsp; &nbsp; 'connector.properties.bootstrap.servers' = > 'localhost:9092' > > &nbsp; &nbsp; &nbsp;) > > &nbsp; &nbsp; ''' > > &nbsp; &nbsp; st_env.sql_update(source_ddl) > > > > > > > > > > def register_rides_sink(st_env): > > &nbsp; &nbsp; sink_ddl = \ > > &nbsp; &nbsp; ''' > > &nbsp; &nbsp; create table sink1( > > &nbsp; &nbsp; &nbsp; &nbsp; id int, > > &nbsp; &nbsp; &nbsp;time1 timestamp, > > &nbsp; &nbsp; &nbsp;time2 timestamp > > &nbsp; &nbsp; &nbsp;) with ( > > &nbsp; &nbsp; 'connector.type' = 'kafka', > > &nbsp; &nbsp; 'update-mode' = 'append', > > &nbsp; &nbsp; 'connector.topic' = 'tp3', > > &nbsp; &nbsp; 'connector.properties.bootstrap.servers' = > 'localhost:9092' > > &nbsp; &nbsp; &nbsp;) > > &nbsp; &nbsp; ''' > > &nbsp; &nbsp; st_env.sql_update(sink_ddl) > > > > > > > > > > if __name__ == '__main__': > > &nbsp; &nbsp; from_kafka_to_kafka_demo() > > > > > > &nbsp; &nbsp;
