Pārlūkot izejas kodu

优化写入 maxcompute

wcc 3 gadi atpakaļ
vecāks
revīzija
a865f57208
27 mainītis faili ar 1607 papildinājumiem un 465 dzēšanām
  1. 213 0
      flink-ad-monitoring/dependency-reduced-pom.xml
  2. 1 1
      flink-ad-monitoring/pom.xml
  3. 78 57
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/KafkaDemoJob.java
  4. 0 11
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/MaxComputeParams.java
  5. 0 89
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/MaxComputeWriter.java
  6. 52 36
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/bean/BeanUtil.java
  7. 1 1
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/bean/DataTypeEnum.java
  8. 30 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/bean/annotation/MaxComputeColumn.java
  9. 10 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/bean/annotation/MaxComputeTable.java
  10. 0 93
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/MaxComputeSink.java
  11. 0 115
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/MaxComputeSinkBuffer.java
  12. 97 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/TunnelBatchSink.java
  13. 156 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/TunnelBatchSinkBuffer.java
  14. 92 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/TunnelBatchWriter.java
  15. 271 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/TunnelStreamSink.java
  16. 42 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/dto/AdDataOfDayDTO.java
  17. 6 1
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/dto/AdDataOfMinuteDTO.java
  18. 508 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/entity/AdDataOfDayODS.java
  19. 11 1
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/entity/AdDataOfMinuteODS.java
  20. 9 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/properties/ApplicationProperties.java
  21. 10 0
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/properties/KafkaProperties.java
  22. 1 3
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/util/ObjectUtil.java
  23. 0 15
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/util/bean/DataMapping.java
  24. 0 42
      flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/util/bean/FieldInfo.java
  25. 7 0
      flink-ad-monitoring/src/main/resources/ad_stream_of_day.properties
  26. 7 0
      flink-ad-monitoring/src/main/resources/ad_stream_of_minute.properties
  27. 5 0
      flink-ad-monitoring/src/main/resources/application.properties

+ 213 - 0
flink-ad-monitoring/dependency-reduced-pom.xml

@@ -0,0 +1,213 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>flink.zanxiangnet.ad.monitoring</groupId>
+  <artifactId>flink-ad-monitoring</artifactId>
+  <name>Flink Walkthrough DataStream Java</name>
+  <version>1.0-SNAPSHOT</version>
+  <url>https://flink.apache.org</url>
+  <build>
+    <pluginManagement>
+      <plugins>
+        <plugin>
+          <groupId>org.eclipse.m2e</groupId>
+          <artifactId>lifecycle-mapping</artifactId>
+          <version>1.0.0</version>
+          <configuration>
+            <lifecycleMappingMetadata>
+              <pluginExecutions>
+                <pluginExecution>
+                  <pluginExecutionFilter>
+                    <groupId>org.apache.maven.plugins</groupId>
+                    <artifactId>maven-shade-plugin</artifactId>
+                    <versionRange>[3.0.0,)</versionRange>
+                    <goals>
+                      <goal>shade</goal>
+                    </goals>
+                  </pluginExecutionFilter>
+                  <action>
+                    <ignore />
+                  </action>
+                </pluginExecution>
+                <pluginExecution>
+                  <pluginExecutionFilter>
+                    <groupId>org.apache.maven.plugins</groupId>
+                    <artifactId>maven-compiler-plugin</artifactId>
+                    <versionRange>[3.1,)</versionRange>
+                    <goals>
+                      <goal>testCompile</goal>
+                      <goal>compile</goal>
+                    </goals>
+                  </pluginExecutionFilter>
+                  <action>
+                    <ignore />
+                  </action>
+                </pluginExecution>
+              </pluginExecutions>
+            </lifecycleMappingMetadata>
+          </configuration>
+        </plugin>
+      </plugins>
+    </pluginManagement>
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.1</version>
+        <configuration>
+          <source>${target.java.version}</source>
+          <target>${target.java.version}</target>
+        </configuration>
+      </plugin>
+      <plugin>
+        <artifactId>maven-shade-plugin</artifactId>
+        <version>3.0.0</version>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>shade</goal>
+            </goals>
+            <configuration>
+              <artifactSet>
+                <excludes>
+                  <exclude>org.apache.flink:flink-shaded-force-shading</exclude>
+                  <exclude>com.google.code.findbugs:jsr305</exclude>
+                  <exclude>org.slf4j:*</exclude>
+                  <exclude>org.apache.logging.log4j:*</exclude>
+                </excludes>
+              </artifactSet>
+              <filters>
+                <filter>
+                  <artifact>*:*</artifact>
+                  <excludes>
+                    <exclude>META-INF/*.SF</exclude>
+                    <exclude>META-INF/*.DSA</exclude>
+                    <exclude>META-INF/*.RSA</exclude>
+                  </excludes>
+                </filter>
+              </filters>
+              <transformers>
+                <transformer>
+                  <mainClass>flink.zanxiangnet.ad.monitoring.KafkaDemoJob</mainClass>
+                </transformer>
+              </transformers>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+  <repositories>
+    <repository>
+      <releases>
+        <enabled>false</enabled>
+      </releases>
+      <snapshots />
+      <id>apache.snapshots</id>
+      <name>Apache Development Snapshot Repository</name>
+      <url>https://repository.apache.org/content/repositories/snapshots/</url>
+    </repository>
+  </repositories>
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.flink</groupId>
+      <artifactId>flink-streaming-java_2.11</artifactId>
+      <version>1.14.0</version>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <artifactId>flink-core</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>flink-file-sink-common</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>flink-runtime</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>flink-scala_2.11</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>flink-java</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>flink-shaded-guava</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>commons-math3</artifactId>
+          <groupId>org.apache.commons</groupId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.flink</groupId>
+      <artifactId>flink-clients_2.11</artifactId>
+      <version>1.14.0</version>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <artifactId>flink-optimizer</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>commons-cli</artifactId>
+          <groupId>commons-cli</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>flink-core</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>flink-runtime</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>flink-java</artifactId>
+          <groupId>org.apache.flink</groupId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-slf4j-impl</artifactId>
+      <version>2.14.1</version>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-api</artifactId>
+      <version>2.14.1</version>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-core</artifactId>
+      <version>2.14.1</version>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.projectlombok</groupId>
+      <artifactId>lombok</artifactId>
+      <version>1.18.2</version>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+  <properties>
+    <odps.version>0.37.10-public</odps.version>
+    <flink.version>1.14.0</flink.version>
+    <target.java.version>1.8</target.java.version>
+    <maven.compiler.target>${target.java.version}</maven.compiler.target>
+    <scala.binary.version>2.11</scala.binary.version>
+    <log4j.version>2.14.1</log4j.version>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    <jackson.version>2.13.0</jackson.version>
+    <maven.compiler.source>${target.java.version}</maven.compiler.source>
+  </properties>
+</project>
+

+ 1 - 1
flink-ad-monitoring/pom.xml

@@ -30,7 +30,7 @@ under the License.
 
     <properties>
         <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-        <flink.version>1.14.0</flink.version>
+        <flink.version>1.13.0</flink.version>
         <target.java.version>1.8</target.java.version>
         <scala.binary.version>2.11</scala.binary.version>
         <maven.compiler.source>${target.java.version}</maven.compiler.source>

+ 78 - 57
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/KafkaDemoJob.java

@@ -1,112 +1,105 @@
 package flink.zanxiangnet.ad.monitoring;
 
-import com.aliyun.odps.Instance;
-import com.aliyun.odps.Odps;
-import com.aliyun.odps.account.Account;
-import com.aliyun.odps.account.AliyunAccount;
-import com.aliyun.odps.data.Record;
-import com.aliyun.odps.task.SQLTask;
+import com.tencent.ads.model.DailyReportsGetListStruct;
 import com.tencent.ads.model.HourlyReportsGetListStruct;
-import flink.zanxiangnet.ad.monitoring.maxcompute.sink.MaxComputeSink;
-import flink.zanxiangnet.ad.monitoring.pojo.AdInfo;
-import flink.zanxiangnet.ad.monitoring.pojo.PlanInfo;
-import flink.zanxiangnet.ad.monitoring.pojo.StatInfo;
+import flink.zanxiangnet.ad.monitoring.maxcompute.sink.TunnelBatchSink;
+import flink.zanxiangnet.ad.monitoring.maxcompute.sink.TunnelStreamSink;
+import flink.zanxiangnet.ad.monitoring.pojo.dto.AdDataOfDayDTO;
+import flink.zanxiangnet.ad.monitoring.pojo.entity.AdDataOfDayODS;
+import flink.zanxiangnet.ad.monitoring.pojo.properties.KafkaProperties;
 import flink.zanxiangnet.ad.monitoring.pojo.dto.AdDataOfMinuteDTO;
 import flink.zanxiangnet.ad.monitoring.pojo.entity.AdDataOfMinuteODS;
-import flink.zanxiangnet.ad.monitoring.pojo.entity.AdStatOfHourDWD;
 import flink.zanxiangnet.ad.monitoring.util.DateUtil;
 import flink.zanxiangnet.ad.monitoring.util.JsonUtil;
-import flink.zanxiangnet.ad.monitoring.util.NumberUtil;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
 import org.apache.flink.api.common.eventtime.WatermarkStrategy;
-import org.apache.flink.api.common.functions.FilterFunction;
-import org.apache.flink.api.common.functions.ReduceFunction;
-import org.apache.flink.api.common.functions.RichMapFunction;
-import org.apache.flink.api.common.functions.RichReduceFunction;
 import org.apache.flink.api.common.serialization.SimpleStringSchema;
-import org.apache.flink.api.java.functions.KeySelector;
 import org.apache.flink.configuration.Configuration;
 import org.apache.flink.connector.kafka.source.KafkaSource;
 import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
 import org.apache.flink.streaming.api.datastream.DataStreamSource;
 import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
-import org.apache.flink.streaming.api.functions.sink.SinkFunction;
-import org.apache.flink.streaming.api.functions.windowing.WindowFunction;
-import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
-import org.apache.flink.streaming.api.windowing.time.Time;
-import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
-import org.apache.flink.util.Collector;
 import org.apache.kafka.clients.CommonClientConfigs;
+import org.apache.kafka.clients.consumer.OffsetResetStrategy;
 import org.apache.kafka.common.config.SaslConfigs;
 import org.apache.kafka.common.config.SslConfigs;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.springframework.beans.BeanUtils;
 
 import java.time.Duration;
 import java.time.LocalDateTime;
+import java.util.Date;
 import java.util.Properties;
 
 public class KafkaDemoJob {
 
     public static void main(String[] args) throws Exception {
         System.setProperty("javax.net.ssl.trustStore", "D:\\Downloads\\kafka.client.truststore.jks");
+        // System.setProperty("javax.net.ssl.trustStore", "/root/flink-1.13.2/kafka.client.truststore.jks");
         System.setProperty("javax.net.ssl.trustStorePassword", "KafkaOnsClient");
         StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
 
+        // 加载配置文件到 flink的全局配置中
         Properties props = new Properties();
-        // props.put(SaslConfigs.SASL_JAAS_CONFIG, "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"alikafka_pre-cn-tl32fsx4l00x\" password=\"VOEdhZLjOrL76lrl5bqPtydtoEkbs0Ny\";");
-        props.put(SaslConfigs.SASL_JAAS_CONFIG, "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"alikafka_pre-cn-tl32fsx4l00x\" password=\"VOEdhZLjOrL76lrl5bqPtydtoEkbs0Ny\";");
-        props.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "D:\\Downloads\\kafka.client.truststore.jks");
-        props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "KafkaOnsClient");
-        props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
-        props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
-        props.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");
-        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
-                .setBootstrapServers("114.55.59.94:9093,112.124.33.132:9093")
-                .setTopics("ad_cost_topic")
-                .setGroupId("ad_cost_group")
-                .setProperties(props)
-                .setStartingOffsets(OffsetsInitializer.earliest())
-                .setValueOnlyDeserializer(new SimpleStringSchema())
-                .build();
+        props.load(KafkaDemoJob.class.getResourceAsStream("/application.properties"));
+        Configuration configuration = new Configuration();
+        props.stringPropertyNames().forEach(key -> {
+            String value = props.getProperty(key);
+            configuration.setString(key.trim(), StringUtils.isBlank(value) ? "" : value.trim());
+        });
+        env.getConfig().setGlobalJobParameters(configuration);
 
-        /*DataStreamSource<String> in = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "Kafka Source");
+        Properties adStreamOfMinuteProps = new Properties();
+        adStreamOfMinuteProps.load(KafkaDemoJob.class.getResourceAsStream("/ad_stream_of_minute.properties"));
+        KafkaSource<String> adStreamOfMinuteSource = buildKafkaSource(adStreamOfMinuteProps);
 
-        SingleOutputStreamOperator<String> adInfo = in.filter(StringUtils::isNotBlank).map(str -> {
-            File file = new File("C:\\Users\\hi\\Desktop\\temp\\ff.txt");
-            if (!file.exists()) {
-                file.createNewFile();
-            }
-            FileWriter writer = new FileWriter(file, true);
-            writer.write(str + "\r\n");
-            writer.close();
-            return str;
-        });*/
+        DataStreamSource<String> adStreamOfMinuteIn = env.fromSource(adStreamOfMinuteSource, WatermarkStrategy.noWatermarks(), "adStreamOfMinuteSource_kafka");
 
-        DataStreamSource<String> in = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "adDataOfMinuteSource_kafka");
-
-        SingleOutputStreamOperator<AdDataOfMinuteODS> adOdsStream = in.filter(StringUtils::isNotBlank)
+        SingleOutputStreamOperator<AdDataOfMinuteODS> adMinuteOdsStream = adStreamOfMinuteIn.filter(StringUtils::isNotBlank)
                 .map(str -> {
                     AdDataOfMinuteDTO dto = JsonUtil.toObj(str, AdDataOfMinuteDTO.class);
                     LocalDateTime statTime = DateUtil.milliToLocalDateTime(dto.getCreateTime());
+                    HourlyReportsGetListStruct struct = dto.getHourlyReportsGetListStruct();
                     AdDataOfMinuteODS adOds = new AdDataOfMinuteODS();
+                    BeanUtils.copyProperties(struct, adOds);
                     adOds.setStatDay(DateUtil.formatLocalDate(statTime.toLocalDate()));
                     adOds.setHour(dto.getHourlyReportsGetListStruct().getHour().intValue());
                     adOds.setStatTime(dto.getCreateTime());
                     adOds.setAccountId(dto.getAccountId());
                     adOds.setAgencyAccountId(dto.getHourlyReportsGetListStruct().getAccountId());
-                    BeanUtils.copyProperties(dto.getHourlyReportsGetListStruct(), adOds);
                     return adOds;
                 })
                 // 打水印,延迟 12分钟,同时指定时间流
                 .assignTimestampsAndWatermarks(WatermarkStrategy.<AdDataOfMinuteODS>forBoundedOutOfOrderness(Duration.ofMinutes(12L))
                         .withTimestampAssigner((SerializableTimestampAssigner<AdDataOfMinuteODS>) (adOds, l) -> adOds.getStatTime())
                 );
+        adMinuteOdsStream.addSink(new TunnelBatchSink<>(AdDataOfMinuteODS.class, 36000L, 64000L, 3));
 
-        adOdsStream.addSink(new MaxComputeSink<>(AdDataOfMinuteODS.class, "statDay"));
+        Properties adStreamOfDayProps = new Properties();
+        adStreamOfDayProps.load(KafkaDemoJob.class.getResourceAsStream("/ad_stream_of_day.properties"));
+        KafkaSource<String> adStreamOfDaySource = buildKafkaSource(adStreamOfDayProps);
+
+        DataStreamSource<String> adStreamOfDayIn = env.fromSource(adStreamOfDaySource, WatermarkStrategy.noWatermarks(), "adStreamOfMinuteSource_kafka");
+
+        SingleOutputStreamOperator<AdDataOfDayODS> adDayOdsStream = adStreamOfDayIn.filter(StringUtils::isNotBlank)
+                .map(str -> {
+                    AdDataOfDayDTO dto = JsonUtil.toObj(str, AdDataOfDayDTO.class);
+                    Date createTime = new Date(dto.getCreateTime());
+                    DailyReportsGetListStruct struct = dto.getDailyReportsGetListStruct();
+                    AdDataOfDayODS adOds = new AdDataOfDayODS();
+                    BeanUtils.copyProperties(struct, adOds);
+                    adOds.setStatDay(struct.getDate());
+                    adOds.setAccountId(dto.getAccountId());
+                    adOds.setCampaignId(struct.getCampaignId());
+                    adOds.setAgencyAccountId(struct.getAccountId());
+                    adOds.setWechatAccountId(struct.getWechatAccountId());
+                    adOds.setAdgroupId(struct.getAdgroupId());
+                    adOds.setAdId(struct.getAdId());
+                    adOds.setCreateTime(createTime);
+                    return adOds;
+                });
+        adDayOdsStream.addSink(new TunnelBatchSink<>(AdDataOfDayODS.class, 36000L, 6000L, 200));
 
         // 获取指定广告的历史统计信息(用于统计总的消耗信息等)
         /*SingleOutputStreamOperator<AdStatOfHourDWD> oldAdStream = adOdsStream.map(new RichMapFunction<AdDataOfMinuteODS, AdStatOfHourDWD>() {
@@ -119,7 +112,7 @@ public class KafkaDemoJob {
                 // odps.setEndpoint("http://service.odps.aliyun.com/api");
                 odps.setEndpoint("http://service.cn-hangzhou.maxcompute.aliyun.com/api");
                 // http://dt.cn-hangzhou.maxcompute.aliyun.com
-                odps.setDefaultProject("ZxData_dev");
+                odps.setDefaultProject("zx_ad_monitoring");
             }
             @Override
             public AdStatOfHourDWD map(AdDataOfMinuteODS adDataOfMinuteODS) throws Exception {
@@ -272,4 +265,32 @@ public class KafkaDemoJob {
         // planStatStream.addSink(new PrintSink<>()).name("msg-print");
         env.execute();
     }
+
+    private static KafkaSource<String> buildKafkaSource(Properties props) {
+        return buildKafkaSource(props.getProperty(KafkaProperties.KAFKA_SERVERS),
+                props.getProperty(KafkaProperties.KAFKA_USERNAME),
+                props.getProperty(KafkaProperties.KAFKA_PASSWORD),
+                props.getProperty(KafkaProperties.KAFKA_SSL_PATH),
+                props.getProperty(KafkaProperties.KAFKA_TOPIC),
+                props.getProperty(KafkaProperties.KAFKA_GROUP_ID)
+        );
+    }
+
+    private static KafkaSource<String> buildKafkaSource(String servers, String username, String password, String sslPath, String topic, String groupId) {
+        Properties props = new Properties();
+        props.put(SaslConfigs.SASL_JAAS_CONFIG, "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"" + username + "\" password=\"" + password + "\";");
+        props.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, sslPath);
+        props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "KafkaOnsClient");
+        props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
+        props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
+        props.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");
+        return KafkaSource.<String>builder()
+                .setBootstrapServers(servers)
+                .setTopics(topic)
+                .setGroupId(groupId)
+                .setProperties(props)
+                .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.EARLIEST))
+                .setValueOnlyDeserializer(new SimpleStringSchema())
+                .build();
+    }
 }

+ 0 - 11
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/MaxComputeParams.java

@@ -1,11 +0,0 @@
-package flink.zanxiangnet.ad.monitoring.maxcompute;
-
-public class MaxComputeParams {
-
-    public static final String ACCESS_ID = "max";
-
-    private String accessId;
-    private String accessKey;
-    private String odpsUrl;
-    private String tunnelUrl;
-}

+ 0 - 89
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/MaxComputeWriter.java

@@ -1,89 +0,0 @@
-package flink.zanxiangnet.ad.monitoring.maxcompute;
-
-import com.aliyun.odps.data.Record;
-import com.aliyun.odps.data.RecordWriter;
-import com.aliyun.odps.tunnel.TableTunnel;
-import flink.zanxiangnet.ad.monitoring.util.JsonUtil;
-import flink.zanxiangnet.ad.monitoring.util.bean.BeanUtil;
-import flink.zanxiangnet.ad.monitoring.util.bean.FieldInfo;
-import flink.zanxiangnet.ad.monitoring.util.bean.ObjectUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.util.CollectionUtils;
-
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.util.List;
-
-/**
- * 实际执行 MaxCompute写入逻辑
- */
-public class MaxComputeWriter<T> implements Runnable {
-    private static final Logger log = LoggerFactory.getLogger(MaxComputeWriter.class);
-
-    private final TableTunnel.UploadSession uploadSession;
-    private final RecordWriter recordWriter;
-    private final Class<T> clazz;
-    private final List<T> dataList;
-
-    public MaxComputeWriter(Class<T> clazz, TableTunnel.UploadSession uploadSession, RecordWriter recordWriter, List<T> dataList) {
-        this.clazz = clazz;
-        this.uploadSession = uploadSession;
-        this.recordWriter = recordWriter;
-        this.dataList = dataList;
-    }
-
-    @Override
-    public void run() {
-        if (CollectionUtils.isEmpty(dataList)) {
-            return;
-        }
-        List<FieldInfo> fieldInfoList = BeanUtil.parseBeanField(clazz);
-        try {
-            int i = 0;
-            for(T t : dataList) {
-                Record record = uploadSession.newRecord();
-                for (FieldInfo fieldInfo : fieldInfoList) {
-                    Object obj = fieldInfo.getGetMethod().invoke(t);
-                    switch (fieldInfo.getColumnType()) {
-                        case TINYINT:
-                        case SMALLINT:
-                        case INT:
-                        case BIGINT:
-                            record.setBigint(fieldInfo.getColumnName(), ObjectUtil.toLong(obj));
-                            break;
-                        case FLOAT:
-                        case DOUBLE:
-                            record.setDouble(fieldInfo.getColumnName(), ObjectUtil.toDouble(obj));
-                            break;
-                        case DECIMAL:
-                            record.setDecimal(fieldInfo.getColumnName(), ObjectUtil.toDecimal(obj));
-                            break;
-                        case VARCHAR:
-                        case CHAR:
-                        case STRING:
-                            record.setString(fieldInfo.getColumnName(), obj == null ? null : obj.toString());
-                            break;
-                        case DATE:
-                        case TIMESTAMP:
-                        case DATETIME:
-                            record.setDatetime(fieldInfo.getColumnName(), ObjectUtil.toDate(obj));
-                            break;
-                        case BOOLEAN:
-                            record.setBoolean(fieldInfo.getColumnName(), ObjectUtil.toBoolean(obj));
-                            break;
-                        default:
-                            throw new RuntimeException("Unknown column type: " + JsonUtil.toString(fieldInfo));
-                    }
-                }
-                if (i++ < 20) {
-                    System.out.println(JsonUtil.toString(record));
-                }
-                recordWriter.write(record);
-            }
-        } catch (IOException | InvocationTargetException | IllegalAccessException e) {
-            log.error(e.getMessage(), e);
-            throw new RuntimeException("Failed writer to MaxCompute!!! Error msg: " + e.getMessage());
-        }
-    }
-}

+ 52 - 36
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/util/bean/BeanUtil.java → flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/bean/BeanUtil.java

@@ -1,12 +1,15 @@
-package flink.zanxiangnet.ad.monitoring.util.bean;
+package flink.zanxiangnet.ad.monitoring.maxcompute.bean;
 
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.annotation.MaxComputeColumn;
+import lombok.AllArgsConstructor;
+import lombok.Builder;
+import lombok.Data;
+import lombok.NoArgsConstructor;
 import org.apache.commons.lang3.StringUtils;
 
 import java.lang.reflect.Field;
 import java.lang.reflect.Method;
-import java.math.BigDecimal;
-import java.time.LocalDate;
-import java.time.LocalDateTime;
+import java.lang.reflect.Modifier;
 import java.util.*;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -17,6 +20,7 @@ public class BeanUtil {
 
     /**
      * 解析 javabean的字段
+     *
      * @param clazz
      * @return
      */
@@ -34,8 +38,11 @@ public class BeanUtil {
             throw new RuntimeException("Failed parsed class[" + clazz.getName() + "]. No Field!");
         }
         List<FieldInfo> fieldInfoList = new ArrayList<>(fieldList.size());
-        for(Field field : fieldList) {
-            DataMapping annotation = field.getAnnotation(DataMapping.class);
+        for (Field field : fieldList) {
+            if (Modifier.isFinal(field.getModifiers()) || Modifier.isStatic(field.getModifiers())) {
+                continue;
+            }
+            MaxComputeColumn annotation = field.getAnnotation(MaxComputeColumn.class);
             FieldInfo fieldInfo = new FieldInfo();
             fieldInfo.setField(field);
             fieldInfo.setFieldName(field.getName());
@@ -49,41 +56,16 @@ public class BeanUtil {
                 if (annotation.ignore()) {
                     continue;
                 }
-                if (StringUtils.isNotBlank(annotation.columnName())) {
-                    fieldInfo.setColumnName(annotation.columnName());
+                if (annotation.isPartitioned()) {
+                    fieldInfo.setUsePartitioned(true);
+                }
+                if (StringUtils.isNotBlank(annotation.value())) {
+                    fieldInfo.setColumnName(annotation.value());
                 } else {
                     fieldInfo.setColumnName(humpToLine(fieldInfo.getFieldName()));
                 }
-                fieldInfo.setColumnType(annotation.dataType());
             } else {
                 fieldInfo.setColumnName(humpToLine(fieldInfo.getFieldName()));
-                DataTypeEnum columnType = null;
-                if (fieldInfo.getFieldType() == Byte.class) {
-                    columnType = DataTypeEnum.INT;
-                } else if (fieldInfo.getFieldType() == Integer.class) {
-                    columnType = DataTypeEnum.INT;
-                } else if (fieldInfo.getFieldType() == Long.class) {
-                    columnType = DataTypeEnum.BIGINT;
-                } else if (fieldInfo.getFieldType() == Float.class) {
-                    columnType = DataTypeEnum.DOUBLE;
-                } else if (fieldInfo.getFieldType() == Double.class) {
-                    columnType = DataTypeEnum.DOUBLE;
-                } else if (fieldInfo.getFieldType() == BigDecimal.class) {
-                    columnType = DataTypeEnum.DECIMAL;
-                } else if (fieldInfo.getFieldType() == Date.class) {
-                    columnType = DataTypeEnum.DATETIME;
-                } else if (fieldInfo.getFieldType() == LocalDate.class) {
-                    columnType = DataTypeEnum.DATE;
-                } else if (fieldInfo.getFieldType() == LocalDateTime.class) {
-                    columnType = DataTypeEnum.DATETIME;
-                } else if (fieldInfo.getFieldType() == Boolean.class) {
-                    columnType = DataTypeEnum.BOOLEAN;
-                } else if (fieldInfo.getFieldType() == String.class) {
-                    columnType = DataTypeEnum.STRING;
-                } else {
-                    throw new RuntimeException("Failed parse class[" + clazz.getName() + "]. The field [" + field.getName() + "] type unknown!");
-                }
-                fieldInfo.setColumnType(columnType);
             }
             fieldInfoList.add(fieldInfo);
         }
@@ -92,6 +74,7 @@ public class BeanUtil {
 
     /**
      * 驼峰转下划线
+     *
      * @param str
      * @return
      */
@@ -107,6 +90,7 @@ public class BeanUtil {
 
     /**
      * 下划线转驼峰
+     *
      * @param str
      * @return
      */
@@ -132,4 +116,36 @@ public class BeanUtil {
         String methodName = "set" + fieldName.substring(0, 1).toUpperCase() + fieldName.substring(1);
         return clazz.getMethod(methodName, field.getType());
     }
+
+    @Data
+    @NoArgsConstructor
+    @AllArgsConstructor
+    @Builder
+    public static class FieldInfo {
+        /**
+         * 是否用于分
+         */
+        private boolean usePartitioned = false;
+        /**
+         * 字段名
+         */
+        private String fieldName;
+        /**
+         * 数据库中的列名
+         */
+        private String columnName;
+        /**
+         * 字段
+         */
+        private Field field;
+        /**
+         * 字段的 get方法
+         */
+        private Method getMethod;
+
+        /**
+         * 字段的 set方法
+         */
+        private Method setMethod;
+    }
 }

+ 1 - 1
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/util/bean/DataTypeEnum.java → flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/bean/DataTypeEnum.java

@@ -1,4 +1,4 @@
-package flink.zanxiangnet.ad.monitoring.util.bean;
+package flink.zanxiangnet.ad.monitoring.maxcompute.bean;
 
 import lombok.Getter;
 

+ 30 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/bean/annotation/MaxComputeColumn.java

@@ -0,0 +1,30 @@
+package flink.zanxiangnet.ad.monitoring.maxcompute.bean.annotation;
+
+import java.lang.annotation.*;
+
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.FIELD, ElementType.ANNOTATION_TYPE})
+public @interface MaxComputeColumn {
+
+    /**
+     * MaxCompute中的列名
+     *
+     * @return
+     */
+    String value() default "";
+
+    /**
+     * 是否忽略该字段
+     *
+     * @return
+     */
+    boolean ignore() default false;
+
+    /**
+     * 是否是分区字段
+     *
+     * @return
+     */
+    boolean isPartitioned() default false;
+}

+ 10 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/bean/annotation/MaxComputeTable.java

@@ -0,0 +1,10 @@
+package flink.zanxiangnet.ad.monitoring.maxcompute.bean.annotation;
+
+import java.lang.annotation.*;
+
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.TYPE, ElementType.ANNOTATION_TYPE})
+public @interface MaxComputeTable {
+    String value();
+}

+ 0 - 93
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/MaxComputeSink.java

@@ -1,93 +0,0 @@
-package flink.zanxiangnet.ad.monitoring.maxcompute.sink;
-
-import com.aliyun.odps.Odps;
-import com.aliyun.odps.account.Account;
-import com.aliyun.odps.account.AliyunAccount;
-import com.aliyun.odps.tunnel.TableTunnel;
-import flink.zanxiangnet.ad.monitoring.maxcompute.MaxComputeLog;
-import flink.zanxiangnet.ad.monitoring.util.JsonUtil;
-import flink.zanxiangnet.ad.monitoring.util.bean.BeanUtil;
-import org.apache.flink.configuration.Configuration;
-import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.util.Map;
-
-public class MaxComputeSink<IN> extends RichSinkFunction<IN> {
-    private static final Logger log = LoggerFactory.getLogger(MaxComputeSink.class);
-
-    // 对象锁,防止MaxCompute的 Tunnel对象多次初始化
-    private static final Object DUMMY_LOCK = new Object();
-
-    private final Class<IN> clazz;
-    // private final Method partitionFieldMethod;
-    private final String methodName;
-
-    private volatile transient MaxComputeSinkBuffer<IN> sinkBuffer;
-
-    public MaxComputeSink(Class<IN> clazz, String partitionFieldName) {
-        this.clazz = clazz;
-        methodName = "get" + partitionFieldName.substring(0, 1).toUpperCase() + partitionFieldName.substring(1);
-        /*try {
-            this.clazz = clazz;
-            Field field = clazz.getDeclaredField(partitionFieldName);
-            partitionFieldMethod = BeanUtil.methodOfGet(clazz, field);
-        } catch (NoSuchFieldException e) {
-            throw new RuntimeException("Can't found field: " + partitionFieldName);
-        } catch (NoSuchMethodException e) {
-            throw new RuntimeException("Can't found getMethod: " + partitionFieldName);
-        }*/
-    }
-
-    @Override
-    public void open(Configuration config) {
-        if (sinkBuffer == null) {
-            synchronized (DUMMY_LOCK) {
-                if (sinkBuffer == null) {
-                    Map<String, String> params = getRuntimeContext()
-                            .getExecutionConfig()
-                            .getGlobalJobParameters()
-                            .toMap();
-
-                    Account account = new AliyunAccount("LTAI5tFuLw65UsH3tqru2K1h", "p1F8my4ovgcEfs3HVORdmeLlLUUKRp");
-                    Odps odps = new Odps(account);
-                    odps.getRestClient().setRetryLogger(new MaxComputeLog());
-                    odps.setEndpoint("http://service.cn-hangzhou.maxcompute.aliyun.com/api");
-                    odps.setDefaultProject("ZxData_dev");
-                    TableTunnel tunnel = new TableTunnel(odps);
-                    tunnel.setEndpoint("http://dt.cn-hangzhou.maxcompute.aliyun.com");
-                    sinkBuffer = new MaxComputeSinkBuffer<>(360000L, 64000L, 6, clazz, tunnel, "ZxData_dev", "ad_data_of_minute_ods");
-                }
-            }
-        }
-    }
-
-    /**
-     * 将值写入到 Sink。每个值都会调用此函数
-     * @param value
-     * @param context
-     * @throws Exception
-     */
-    @Override
-    public void invoke(IN value, Context context) throws Exception {
-        Object obj = clazz.getMethod(methodName).invoke(value);
-        if (obj == null) {
-            throw new RuntimeException("The value of the partition field cannot be null!!!");
-        }
-        sinkBuffer.put(obj.toString(), value);
-    }
-
-    @Override
-    public void close() throws Exception {
-        if (sinkBuffer != null) {
-            synchronized (DUMMY_LOCK) {
-                sinkBuffer.close();
-            }
-        }
-
-        super.close();
-    }
-}

+ 0 - 115
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/MaxComputeSinkBuffer.java

@@ -1,115 +0,0 @@
-package flink.zanxiangnet.ad.monitoring.maxcompute.sink;
-
-import com.aliyun.odps.PartitionSpec;
-import com.aliyun.odps.tunnel.TableTunnel;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import flink.zanxiangnet.ad.monitoring.maxcompute.MaxComputeWriter;
-import flink.zanxiangnet.ad.monitoring.util.JsonUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.util.CollectionUtils;
-
-import java.util.*;
-import java.util.concurrent.*;
-
-/**
- * 缓存要写入 MaxCompute的数据
- */
-public class MaxComputeSinkBuffer<T> {
-    private static final Logger log = LoggerFactory.getLogger(MaxComputeSinkBuffer.class);
-
-    private final TableTunnel tunnel;
-    private final String projectName;
-    private final String tableName;
-    private final long buffRefreshTime;
-    private final long maxBuffer;
-    private final ExecutorService threadPool;
-    private final Map<String, List<T>> partitionData;
-    private final Class<T> clazz;
-
-    private Thread coreThread;
-
-    public MaxComputeSinkBuffer(
-            long buffRefreshTime,
-            long maxBuffer,
-            int partitionCount,
-            Class<T> clazz,
-            TableTunnel tunnel,
-            String projectName,
-            String tableName
-    ) {
-        this.clazz = clazz;
-        this.tunnel = tunnel;
-        this.projectName = projectName;
-        this.tableName = tableName;
-        this.buffRefreshTime = buffRefreshTime;
-        this.maxBuffer = maxBuffer;
-        partitionData = new ConcurrentHashMap<>(partitionCount);
-
-        threadPool = new ThreadPoolExecutor(
-                partitionCount,
-                partitionCount,
-                0L,
-                TimeUnit.MILLISECONDS,
-                new LinkedBlockingQueue<>(),
-                new ThreadFactoryBuilder()
-                        .setNameFormat("maxcompute-writer-%d").build());
-        start();
-    }
-
-    /**
-     * 写入数据
-     * @param partition 数据分区
-     * @param t 数据
-     */
-    public void put(String partition, T t) {
-        List<T> list = partitionData.computeIfAbsent(partition, key -> new ArrayList<>());
-        list.add(t);
-    }
-
-    public void close() {
-        coreThread.interrupt();
-    }
-
-    private void start() {
-        coreThread = new Thread(() -> {
-            Map<String, Long> lastSaveTime = new HashMap<>();
-            // 开启一个定时轮询缓存数据的线程
-            while (true) {
-                try {
-                    if (CollectionUtils.isEmpty(partitionData)) {
-                        // 没有数据,等 5s
-                        Thread.sleep(5000L);
-                        continue;
-                    }
-                    long now = System.currentTimeMillis();
-                    List<String> saveKeys = new ArrayList<>(partitionData.size());
-                    for (Map.Entry<String, List<T>> entry : partitionData.entrySet()) {
-                        Long lastSaveTimeOfKey = lastSaveTime.computeIfAbsent(entry.getKey(), key -> now);
-                        boolean save = now - lastSaveTimeOfKey > buffRefreshTime;
-                        if (save || entry.getValue().size() > maxBuffer) {
-                            saveKeys.add(entry.getKey());
-                        }
-                    }
-                    if (CollectionUtils.isEmpty(saveKeys)) {
-                        Thread.sleep(5000L);
-                        continue;
-                    } else {
-                        System.out.println("===>实际写入数据2:" + JsonUtil.toString(saveKeys));
-                        for (int i = 0; i < saveKeys.size(); i++) {
-                            String key = saveKeys.get(i);
-                            List<T> datas = partitionData.remove(key);
-                            System.out.println("===>实际写入数据3:" + datas.size());
-                            lastSaveTime.remove(key);
-                            TableTunnel.UploadSession uploadSession = tunnel.createUploadSession(projectName, tableName, new PartitionSpec(key));
-                            threadPool.submit(new MaxComputeWriter<>(clazz, uploadSession, uploadSession.openRecordWriter(i), datas));
-                        }
-                    }
-                } catch (Exception e) {
-                    log.error(e.getMessage(), e);
-                }
-            }
-        });
-        coreThread.start();
-    }
-}

+ 97 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/TunnelBatchSink.java

@@ -0,0 +1,97 @@
+package flink.zanxiangnet.ad.monitoring.maxcompute.sink;
+
+import com.aliyun.odps.Odps;
+import com.aliyun.odps.Table;
+import com.aliyun.odps.account.Account;
+import com.aliyun.odps.account.AliyunAccount;
+import com.aliyun.odps.tunnel.TableTunnel;
+import flink.zanxiangnet.ad.monitoring.maxcompute.MaxComputeLog;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.BeanUtil;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.annotation.MaxComputeTable;
+import flink.zanxiangnet.ad.monitoring.pojo.properties.ApplicationProperties;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+
+public class TunnelBatchSink<IN> extends RichSinkFunction<IN> {
+    private static final Logger log = LoggerFactory.getLogger(TunnelBatchSink.class);
+
+    // 对象锁,防止MaxCompute的 Tunnel对象多次初始化
+    private static final Object DUMMY_LOCK = new Object();
+
+    private final Class<IN> clazz;
+    // 缓存刷新的间隔时间
+    private final Long bufferRefreshTime;
+    // 缓存的最大数据量
+    private final Long maxBufferCount;
+    // 可能同时存在的分区数
+    private final Integer partitionCount;
+
+    private volatile transient TunnelBatchSinkBuffer<IN> sinkBuffer;
+
+    public TunnelBatchSink(Class<IN> clazz, Long bufferRefreshTime, Long maxBufferCount, Integer partitionCount) {
+        this.clazz = clazz;
+        this.bufferRefreshTime = bufferRefreshTime;
+        this.maxBufferCount = maxBufferCount;
+        this.partitionCount = partitionCount;
+    }
+
+    @Override
+    public void open(Configuration config) {
+        if (sinkBuffer == null) {
+            synchronized (DUMMY_LOCK) {
+                if (sinkBuffer == null) {
+                    Map<String, String> params = getRuntimeContext()
+                            .getExecutionConfig()
+                            .getGlobalJobParameters()
+                            .toMap();
+                    MaxComputeTable tableAnnotation = clazz.getAnnotation(MaxComputeTable.class);
+
+                    Account account = new AliyunAccount(params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_ID),
+                            params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_KEY));
+                    Odps odps = new Odps(account);
+                    odps.getRestClient().setRetryLogger(new MaxComputeLog());
+                    odps.setEndpoint(params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_ENDPOINT));
+                    odps.setDefaultProject(params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_PROJECT_NAME));
+                    TableTunnel tunnel = new TableTunnel(odps);
+                    tunnel.setEndpoint(params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_TUNNEL_ENDPOINT));
+                    Table table = odps.tables().get(tableAnnotation.value());
+                    List<BeanUtil.FieldInfo> fieldInfoList = BeanUtil.parseBeanField(clazz);
+                    sinkBuffer = new TunnelBatchSinkBuffer<>(bufferRefreshTime,
+                            maxBufferCount,
+                            partitionCount,
+                            tunnel,
+                            params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_PROJECT_NAME),
+                            table,
+                            fieldInfoList);
+                }
+            }
+        }
+    }
+
+    /**
+     * 将值写入到 Sink。每个值都会调用此函数
+     *
+     * @param value
+     * @param context
+     */
+    @Override
+    public void invoke(IN value, Context context) {
+        sinkBuffer.put(value);
+    }
+
+    @Override
+    public void close() throws Exception {
+        if (sinkBuffer != null) {
+            synchronized (DUMMY_LOCK) {
+                sinkBuffer.close();
+            }
+        }
+
+        super.close();
+    }
+}

+ 156 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/TunnelBatchSinkBuffer.java

@@ -0,0 +1,156 @@
+package flink.zanxiangnet.ad.monitoring.maxcompute.sink;
+
+import com.aliyun.odps.PartitionSpec;
+import com.aliyun.odps.Table;
+import com.aliyun.odps.tunnel.TableTunnel;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.BeanUtil;
+import flink.zanxiangnet.ad.monitoring.util.JsonUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.CollectionUtils;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.stream.Collectors;
+
+/**
+ * 缓存要写入 MaxCompute的数据
+ */
+public class TunnelBatchSinkBuffer<T> {
+    private static final Logger log = LoggerFactory.getLogger(TunnelBatchSinkBuffer.class);
+
+    private static final String PART = "PART";
+    private static final String NO_PART = "NO_PART";
+
+    private final TableTunnel tunnel;
+    private final String projectName;
+    private final Table tableInfo;
+
+    private final Long buffRefreshTime;
+    private final Long maxBuffer;
+
+    private final ConcurrentHashMap<String, List<T>> partitionData;
+    private final Map<String, Method> partitionFieldMethods;
+    private final List<BeanUtil.FieldInfo> fieldInfoList;
+
+    private boolean isRun = false;
+    private Thread coreThread;
+    private final ExecutorService threadPool;
+
+    public TunnelBatchSinkBuffer(
+            Long buffRefreshTime,
+            Long maxBuffer,
+            Integer partitionCount,
+            TableTunnel tunnel,
+            String projectName,
+            Table tableInfo,
+            List<BeanUtil.FieldInfo> fieldInfoList
+    ) {
+        this.tunnel = tunnel;
+        this.projectName = projectName;
+        this.tableInfo = tableInfo;
+        this.buffRefreshTime = buffRefreshTime;
+        this.maxBuffer = maxBuffer;
+        this.fieldInfoList = fieldInfoList;
+        this.partitionData = new ConcurrentHashMap<>(partitionCount);
+
+        this.partitionFieldMethods = fieldInfoList.stream().filter(BeanUtil.FieldInfo::isUsePartitioned).collect(Collectors.toMap(BeanUtil.FieldInfo::getColumnName, BeanUtil.FieldInfo::getGetMethod));
+        threadPool = new ThreadPoolExecutor(
+                partitionCount,
+                partitionCount,
+                0L,
+                TimeUnit.MILLISECONDS,
+                new LinkedBlockingQueue<>(),
+                new ThreadFactoryBuilder()
+                        .setNameFormat("maxcompute-writer-%d").build());
+        start();
+    }
+
+    /**
+     * 写入数据
+     *
+     * @param t 数据
+     */
+    public void put(T t) {
+        StringBuilder partition;
+        if (CollectionUtils.isEmpty(partitionFieldMethods)) {
+            partition = new StringBuilder(NO_PART);
+        } else {
+            partition = new StringBuilder(PART);
+            for (Map.Entry<String, Method> entry : partitionFieldMethods.entrySet()) {
+                partition.append(entry.getKey()).append("=");
+                try {
+                    partition.append(entry.getValue().invoke(t));
+                } catch (InvocationTargetException | IllegalAccessException e) {
+                    // 获取分区字段的值失败
+                    log.error(e.getMessage(), e);
+                    throw new RuntimeException("Failed get partition field value!");
+                }
+                partition.append(",");
+            }
+            partition = new StringBuilder(partition.substring(0, partition.length() - 1));
+        }
+        List<T> list = partitionData.computeIfAbsent(partition.toString(), key -> new ArrayList<>());
+        list.add(t);
+    }
+
+    public void close() {
+        isRun = false;
+        threadPool.shutdown();
+        coreThread.interrupt();
+    }
+
+    private void start() {
+        coreThread = new Thread(() -> {
+            Map<String, Long> lastSaveTime = new HashMap<>();
+            // 开启一个定时轮询缓存数据的线程
+            while (isRun) {
+                try {
+                    if (CollectionUtils.isEmpty(partitionData)) {
+                        // 没有数据,等 5s
+                        try {
+                            Thread.sleep(5000L);
+                        } catch (InterruptedException e) {
+                            Thread.sleep(5000L);
+                        }
+                        continue;
+                    }
+                    long now = System.currentTimeMillis();
+                    List<String> partitionKeys = new ArrayList<>(partitionData.size());
+                    for (Map.Entry<String, List<T>> entry : partitionData.entrySet()) {
+                        Long lastSaveTimeOfKey = lastSaveTime.computeIfAbsent(entry.getKey(), key -> now);
+                        boolean mustSave = now - lastSaveTimeOfKey > buffRefreshTime;
+                        if (mustSave || entry.getValue().size() > maxBuffer) {
+                            partitionKeys.add(entry.getKey());
+                        }
+                    }
+                    if (CollectionUtils.isEmpty(partitionKeys)) {
+                        Thread.sleep(5000L);
+                        continue;
+                    }
+
+                    for (String partitionKey : partitionKeys) {
+                        List<T> dataList = partitionData.remove(partitionKey);
+                        lastSaveTime.remove(partitionKey);
+
+                        PartitionSpec part = null;
+                        if (partitionKey.startsWith(PART)) {
+                            part = new PartitionSpec(partitionKey.substring(PART.length()));
+                            // 尽然不会自己建分区!!!
+                            // 创建单个分区大概 1~5秒(大部分在 2、3秒)
+                            tableInfo.createPartition(part, true);
+                        }
+                        threadPool.submit(new TunnelBatchWriter<>(tunnel, projectName, tableInfo.getName(), part, dataList, fieldInfoList));
+                    }
+                } catch (Exception e) {
+                    log.error(e.getMessage(), e);
+                }
+            }
+        });
+        isRun = true;
+        coreThread.start();
+    }
+}

+ 92 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/TunnelBatchWriter.java

@@ -0,0 +1,92 @@
+package flink.zanxiangnet.ad.monitoring.maxcompute.sink;
+
+import com.aliyun.odps.PartitionSpec;
+import com.aliyun.odps.data.Record;
+import com.aliyun.odps.data.RecordWriter;
+import com.aliyun.odps.tunnel.TableTunnel;
+import com.aliyun.odps.tunnel.TunnelException;
+import flink.zanxiangnet.ad.monitoring.util.JsonUtil;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.BeanUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.CollectionUtils;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.util.List;
+
+/**
+ * 实际执行 MaxCompute写入逻辑
+ */
+public class TunnelBatchWriter<T> implements Runnable {
+    private static final Logger log = LoggerFactory.getLogger(TunnelBatchWriter.class);
+
+    private final TableTunnel tunnel;
+    private final String projectName;
+    private final String tableName;
+    private final PartitionSpec part;
+    private final List<T> dataList;
+    private final List<BeanUtil.FieldInfo> fieldInfoList;
+
+    public TunnelBatchWriter(TableTunnel tunnel,
+                             String projectName,
+                             String tableName,
+                             PartitionSpec part,
+                             List<T> dataList,
+                             List<BeanUtil.FieldInfo> fieldInfoList
+    ) {
+        this.tunnel = tunnel;
+        this.projectName = projectName;
+        this.tableName = tableName;
+        this.part = part;
+        this.dataList = dataList;
+        this.fieldInfoList = fieldInfoList;
+    }
+
+    @Override
+    public void run() {
+        if (CollectionUtils.isEmpty(dataList)) {
+            return;
+        }
+        int retry = 0;
+        do {
+            try {
+                if (retry > 0) {
+                    log.error("重试:" + retry);
+                }
+                // 创建 session大概要 200多毫秒
+                TableTunnel.UploadSession uploadSession = part == null ? tunnel.createUploadSession(projectName, tableName)
+                        : tunnel.createUploadSession(projectName, tableName, part);
+                long blockId = 0;
+                RecordWriter writer = uploadSession.openRecordWriter(blockId);
+                for (T t : dataList) {
+                    Record record = uploadSession.newRecord();
+                    for (BeanUtil.FieldInfo fieldInfo : fieldInfoList) {
+                        if (fieldInfo.isUsePartitioned()) {
+                            // 分区字段不在这里设值
+                            continue;
+                        }
+                        Object obj = fieldInfo.getGetMethod().invoke(t);
+                        record.set(fieldInfo.getColumnName(), obj);
+                    }
+                    writer.write(record);
+                }
+                writer.close();
+                uploadSession.commit(new Long[]{blockId});
+                break;
+            } catch (IOException | InvocationTargetException | IllegalAccessException | TunnelException e) {
+                log.error(e.getMessage(), e);
+                if (++retry >= 3) {
+                    log.error("重试失败");
+                    throw new RuntimeException("Failed writer to MaxCompute!!! Error msg: " + e.getMessage());
+                }
+            } catch (Exception e) {
+                if (++retry >= 3) {
+                    log.error("重试失败");
+                    log.error(e.getMessage(), e);
+                    throw new RuntimeException("Failed writer to MaxCompute!!! Error msg: " + e.getMessage());
+                }
+            }
+        } while (true);
+    }
+}

+ 271 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/maxcompute/sink/TunnelStreamSink.java

@@ -0,0 +1,271 @@
+package flink.zanxiangnet.ad.monitoring.maxcompute.sink;
+
+import com.aliyun.odps.Odps;
+import com.aliyun.odps.PartitionSpec;
+import com.aliyun.odps.account.Account;
+import com.aliyun.odps.account.AliyunAccount;
+import com.aliyun.odps.data.Record;
+import com.aliyun.odps.tunnel.TableTunnel;
+import com.aliyun.odps.tunnel.TunnelException;
+import flink.zanxiangnet.ad.monitoring.maxcompute.MaxComputeLog;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.BeanUtil;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.annotation.MaxComputeTable;
+import flink.zanxiangnet.ad.monitoring.pojo.properties.ApplicationProperties;
+import lombok.AllArgsConstructor;
+import lombok.Builder;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.util.CollectionUtils;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+
+/**
+ * 该类有严重 bug,请勿使用
+ *
+ * @param <IN>
+ */
+@Deprecated
+public class TunnelStreamSink<IN> extends RichSinkFunction<IN> {
+    private static final Logger log = LoggerFactory.getLogger(TunnelStreamSink.class);
+
+    // 没有分区表的 key
+    private static final String DEFAULT_PART = "DEFAULT_PART";
+    // uploadSession的生命周期(2小时)
+    private static final Long SESSION_TIMEOUT = 2 * 60 * 60 * 1000L;
+
+    // 对象锁,防止MaxCompute的 Tunnel对象多次初始化
+    private static final Object DUMMY_LOCK = new Object();
+
+    private final Class<IN> clazz;
+    private String projectName;
+    private String tableName;
+
+    // 缓存刷新的间隔时间
+    private final Long bufferRefreshTime;
+    // 缓存的最大数据量
+    private final Long maxBufferCount;
+    // 可能同时存在的分区数
+    private final Integer partitionCount;
+
+    private volatile transient TableTunnel tunnel;
+    private volatile transient List<BeanUtil.FieldInfo> fieldInfoList;
+    private volatile transient Map<String, Method> partitionFieldMethods;
+
+    private volatile transient ConcurrentHashMap<String, UploadPartition> partitionUploadInfo;
+
+    private transient Thread coreThread;
+    private volatile boolean isRun = false;
+
+    public TunnelStreamSink(Class<IN> clazz, Long bufferRefreshTime, Long maxBufferCount, Integer partitionCount) {
+        this.clazz = clazz;
+        this.bufferRefreshTime = Math.max(bufferRefreshTime, 60 * 1000);
+        this.maxBufferCount = Math.max(maxBufferCount, 1000);
+        this.partitionCount = Math.max(partitionCount, 1);
+    }
+
+    @Override
+    public void open(Configuration config) {
+        if (tunnel == null) {
+            synchronized (DUMMY_LOCK) {
+                if (tunnel == null) {
+                    long start = System.currentTimeMillis();
+                    Map<String, String> params = getRuntimeContext()
+                            .getExecutionConfig()
+                            .getGlobalJobParameters()
+                            .toMap();
+                    MaxComputeTable tableAnnotation = clazz.getAnnotation(MaxComputeTable.class);
+
+                    Account account = new AliyunAccount(params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_ID),
+                            params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_KEY));
+                    Odps odps = new Odps(account);
+                    odps.getRestClient().setRetryLogger(new MaxComputeLog());
+                    odps.setEndpoint(params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_ENDPOINT));
+                    odps.setDefaultProject(params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_PROJECT_NAME));
+                    tunnel = new TableTunnel(odps);
+                    tunnel.setEndpoint(params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_TUNNEL_ENDPOINT));
+                    projectName = params.get(ApplicationProperties.MAX_COMPUTE_ACCOUNT_PROJECT_NAME);
+                    tableName = tableAnnotation.value();
+                    fieldInfoList = BeanUtil.parseBeanField(clazz);
+                    partitionFieldMethods = fieldInfoList.stream().filter(BeanUtil.FieldInfo::isUsePartitioned).collect(Collectors.toMap(BeanUtil.FieldInfo::getColumnName, BeanUtil.FieldInfo::getGetMethod));
+                    partitionUploadInfo = new ConcurrentHashMap<>(partitionCount);
+                    // Table table = odps.tables().get(tableAnnotation.value());
+                    initThread();
+                }
+            }
+        }
+    }
+
+    /**
+     * 定时清理刷新缓存中的数据
+     */
+    private void initThread() {
+        // 定时清理数据
+        coreThread = new Thread(() -> {
+            while (isRun) {
+                try {
+                    if (partitionUploadInfo != null && !partitionUploadInfo.isEmpty()) {
+                        long now = System.currentTimeMillis();
+                        for (Map.Entry<String, UploadPartition> entry : partitionUploadInfo.entrySet()) {
+                            if (now - entry.getValue().getLastFlushTime() >= bufferRefreshTime) {
+                                synchronized (DUMMY_LOCK) {
+                                    if (now - entry.getValue().getLastFlushTime() >= bufferRefreshTime) {
+                                        if (entry.getValue().getPack().getRecordCount() > 0) {
+                                            int retry = 0;
+                                            do {
+                                                try {
+                                                    // 大概用时 100ms ~ 3s
+                                                    entry.getValue().getPack().flush();
+                                                    break;
+                                                } catch (IOException e) {
+                                                    // flush失败,老的 pack就不可用了
+                                                    entry.getValue().setPack(entry.getValue().getUploadSession().newRecordPack());
+                                                    if (retry == 3) {
+                                                        log.error("Flush data error!");
+                                                        throw e;
+                                                    }
+                                                }
+                                            } while (retry++ < 3);
+                                        }
+                                        entry.getValue().setLastFlushTime(now);
+                                    }
+                                }
+                            }
+                        }
+                    }
+                    try {
+                        // 等 6s
+                        Thread.sleep(6000);
+                    } catch (InterruptedException e) {
+                        // 等 6s
+                        Thread.sleep(6000);
+                    }
+                } catch (Exception e) {
+                    log.error(e.getMessage(), e);
+                }
+            }
+        });
+        isRun = true;
+        coreThread.start();
+    }
+
+    /**
+     * 将值写入到 Sink。每个值都会调用此函数
+     *
+     * @param value
+     * @param context
+     */
+    @Override
+    public void invoke(IN value, Context context) throws TunnelException, IOException, InvocationTargetException, IllegalAccessException {
+        String partitionStr = generatePartitionStr(value);
+        UploadPartition uploadPartition = partitionUploadInfo.get(partitionStr);
+        if (uploadPartition == null || sessionTimeout(uploadPartition)) {
+            synchronized (DUMMY_LOCK) {
+                uploadPartition = partitionUploadInfo.get(partitionStr);
+                if (uploadPartition == null || sessionTimeout(uploadPartition)) {
+                    long start = System.currentTimeMillis();
+                    PartitionSpec partitionSpec = partitionStr.equals(DEFAULT_PART) ? null : new PartitionSpec(partitionStr);
+                    uploadPartition = UploadPartition.builder()
+                            .partition(partitionStr)
+                            .uploadSession(tunnel.createStreamUploadSession(projectName, tableName, partitionSpec, true))
+                            .partitionSpec(partitionSpec)
+                            .createTime(System.currentTimeMillis())
+                            .build();
+                    uploadPartition.setPack(uploadPartition.getUploadSession().newRecordPack());
+                    partitionUploadInfo.put(partitionStr, uploadPartition);
+                }
+            }
+        }
+        Record record = uploadPartition.getUploadSession().newRecord();
+        for (BeanUtil.FieldInfo fieldInfo : fieldInfoList) {
+            if (fieldInfo.isUsePartitioned()) {
+                // 分区字段不在这里设值
+                continue;
+            }
+            Object obj = fieldInfo.getGetMethod().invoke(value);
+            record.set(fieldInfo.getColumnName(), obj);
+        }
+        // append只是写入内存
+        uploadPartition.getPack().append(record);
+
+        flush(uploadPartition);
+    }
+
+    @Override
+    public void close() throws Exception {
+        isRun = false;
+        coreThread.interrupt();
+        super.close();
+    }
+
+    private void flush(UploadPartition uploadPartition) throws IOException {
+        if (uploadPartition.getPack().getDataSize() >= maxBufferCount) {
+            synchronized (DUMMY_LOCK) {
+                if (uploadPartition.getPack().getDataSize() >= maxBufferCount) {
+                    int retry = 0;
+                    do {
+                        try {
+                            // 大概用时 100ms ~ 3s
+                            uploadPartition.getPack().flush();
+                            uploadPartition.setLastFlushTime(System.currentTimeMillis());
+                            break;
+                        } catch (IOException e) {
+                            // flush失败,老的 pack就不可用了
+                            uploadPartition.setPack(uploadPartition.getUploadSession().newRecordPack());
+                            if (retry == 3) {
+                                log.error("Flush data error!");
+                                throw e;
+                            }
+                        }
+                    } while (retry++ < 3);
+                }
+            }
+        }
+    }
+
+    private boolean sessionTimeout(UploadPartition uploadPartition) {
+        return System.currentTimeMillis() - uploadPartition.getCreateTime() > SESSION_TIMEOUT;
+    }
+
+    private String generatePartitionStr(IN t) {
+        if (CollectionUtils.isEmpty(partitionFieldMethods)) {
+            return DEFAULT_PART;
+        }
+        StringBuilder partition = new StringBuilder();
+        for (Map.Entry<String, Method> entry : partitionFieldMethods.entrySet()) {
+            partition.append(entry.getKey()).append("=");
+            try {
+                partition.append(entry.getValue().invoke(t));
+            } catch (InvocationTargetException | IllegalAccessException e) {
+                // 获取分区字段的值失败
+                log.error(e.getMessage(), e);
+                throw new RuntimeException("Failed get partition field value!");
+            }
+            partition.append(",");
+        }
+        partition = new StringBuilder(partition.substring(0, partition.length() - 1));
+        return partition.toString();
+    }
+
+    @Data
+    @NoArgsConstructor
+    @AllArgsConstructor
+    @Builder
+    private static class UploadPartition {
+        private String partition;
+        private Long createTime;
+        private PartitionSpec partitionSpec;
+        private TableTunnel.StreamUploadSession uploadSession;
+        private TableTunnel.StreamRecordPack pack;
+        private long lastFlushTime = System.currentTimeMillis();
+    }
+}

+ 42 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/dto/AdDataOfDayDTO.java

@@ -0,0 +1,42 @@
+package flink.zanxiangnet.ad.monitoring.pojo.dto;
+
+import com.tencent.ads.model.DailyReportsGetListStruct;
+import lombok.Data;
+
+import java.time.LocalDate;
+
+/**
+ * 按天拉取的广告数据
+ */
+@Data
+public class AdDataOfDayDTO {
+    /**
+     * 创建时间
+     */
+    private Long createTime;
+
+    /**
+     * 从腾讯获取数据的起始时间
+     */
+    private LocalDate startDate;
+
+    /**
+     * 从腾讯获取数据的结束时间
+     */
+    private LocalDate endDate;
+
+    /**
+     * 账号
+     */
+    private String channel;
+
+    /**
+     * 账号
+     */
+    private Long accountId;
+
+    /**
+     * 消耗数据文本
+     */
+    private DailyReportsGetListStruct dailyReportsGetListStruct;
+}

+ 6 - 1
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/dto/AdDataOfMinuteDTO.java

@@ -10,7 +10,12 @@ import lombok.Data;
 public class AdDataOfMinuteDTO {
 
     /**
-     * 创建时间
+     * 拉取的时间
+     */
+    private Long dataTime;
+
+    /**
+     * 数据创建时间
      */
     private Long createTime;
 

+ 508 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/entity/AdDataOfDayODS.java

@@ -0,0 +1,508 @@
+package flink.zanxiangnet.ad.monitoring.pojo.entity;
+
+import com.google.gson.annotations.SerializedName;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.annotation.MaxComputeColumn;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.annotation.MaxComputeTable;
+import lombok.Data;
+
+import java.io.Serializable;
+import java.util.Date;
+
+@Data
+@MaxComputeTable("ad_data_of_day_ods")
+public class AdDataOfDayODS implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * 统计日 yyyy-MM-dd(用于 MaxCompute分区)
+     */
+    @MaxComputeColumn(isPartitioned = true)
+    @SerializedName("stat_day")
+    private String statDay;
+
+    /**
+     * 应用下的账号 id
+     */
+    @SerializedName("account_id")
+    private Long accountId;
+
+    /**
+     * 计划 id
+     */
+    @SerializedName("campaign_id")
+    private Long campaignId;
+
+    /**
+     * 服务商账号 id
+     */
+    @SerializedName("agency_account_id")
+    private Long agencyAccountId;
+
+    /**
+     * 微信账号id
+     */
+    @SerializedName("wechat_account_id")
+    private String wechatAccountId;
+
+    /**
+     * 微信服务商id
+     */
+    @SerializedName("wechat_agency_id")
+    private String wechatAgencyId;
+
+    /**
+     * 广告组 id
+     */
+    @SerializedName("adgroup_id")
+    private Long adgroupId;
+
+    /**
+     * 广告 id
+     */
+    @SerializedName("ad_id")
+    private Long adId;
+
+    /**
+     * 记录创建时间
+     */
+    @SerializedName("create_time")
+    private Date createTime;
+
+    /**
+     * 当日成本偏差。反映广告今日的实际成本与目标成本直接的差异,注:该项成本相关数据按小时更新,与实时更新的「转化目标成本」数据存在出入属于正常情况。
+     */
+    @SerializedName("cost_deviation_rate")
+    private Double costDeviationRate;
+
+    /**
+     * 消耗
+     */
+    @SerializedName("cost")
+    private Long cost;
+
+    /**
+     * 赔付金额。智能优化成本保障政策下,广告超成本时的赔付金额。
+     */
+    @SerializedName("compensation_amount")
+    private Long compensationAmount;
+
+    /**
+     * 曝光次数。用户观看广告的次数。
+     */
+    @SerializedName("view_count")
+    private Long viewCount;
+
+    /**
+     * 千次曝光成本。平均每千次曝光的花费。
+     */
+    @SerializedName("thousand_display_price")
+    private Long thousandDisplayPrice;
+
+    /**
+     * 曝光人数。观看广告的独立用户数。
+     */
+    @SerializedName("view_user_count")
+    private Long viewUserCount;
+
+    /**
+     * 人均曝光次数。每个用户观看广告的平均次数。
+     */
+    @SerializedName("avg_view_per_user")
+    private Double avgViewPerUser;
+
+    /**
+     * 点击次数。用户在广告外层进行点击操作的次数。包括点击图片/视频,及朋友圈广告“文字链、头像、昵称、门店、选择按钮”等所有广告外层区域的点击。
+     */
+    @SerializedName("valid_click_count")
+    private Long validClickCount;
+
+    /**
+     * 点击人数。在广告外层进行点击操作的独立用户数。
+     */
+    @SerializedName("click_user_count")
+    private Long clickUserCount;
+
+    /**
+     * 点击率。看到广告后执行点击操作的百分比。计算逻辑:广告点击次数/广告曝光次数。
+     */
+    @SerializedName("ctr")
+    private Double ctr;
+
+    /**
+     * 点击均价。一次广告点击的平均花费。计算逻辑:广告花费/广告点击次数。
+     */
+    @SerializedName("cpc")
+    private Long cpc;
+
+    /**
+     * 可转化点击次数。朋友圈:可转化点击是指可能产生转化的外层点击次数。对于“公众号推广”的广告,包括外层的公众号头像、公众号昵称、详情页查看、原生推广页查看; 对于其他类型的广告,包括外层的详情页查看和原生推广页查看。公众号:可转化点击是指可能产生转化的点击次数。
+     */
+    @SerializedName("valuable_click_count")
+    private Long valuableClickCount;
+
+    /**
+     * 可转化点击率。用户看到广告后执行可转化点击操作的百分比。计算逻辑:广告可转化点击次数/广告曝光次数。
+     */
+    @SerializedName("valuable_click_rate")
+    private Double valuableClickRate;
+
+    /**
+     * 可转化点击成本。一次可转化点击的平均花费。计算逻辑:广告花费/可转化点击次数。
+     */
+    @SerializedName("valuable_click_cost")
+    private Long valuableClickCost;
+
+    /**
+     * 转化目标量。「转化目标」的具体数量,代表该广告的转化效果量级。
+     */
+    @SerializedName("conversions_count")
+    private Long conversionsCount;
+
+    /**
+     * 转化目标成本。广告产生一次转化目标的平均费用。计算逻辑:广告花费/转化目标量。
+     */
+    @SerializedName("conversions_cost")
+    private Long conversionsCost;
+
+    /**
+     * 目标转化率。朋友圈:转化目标量/可转化点击次数。公众号:转化目标量/点击次数。
+     */
+    @SerializedName("conversions_rate")
+    private Double conversionsRate;
+
+    /**
+     * 深度转化目标量-灰度中。根据您选择的深度智能优化目标,该广告对应的具体数量。部分需接入转化跟踪后可统计。
+     */
+    @SerializedName("deep_conversions_count")
+    private Long deepConversionsCount;
+
+    /**
+     * 深度转化目标成本-灰度中。根据您选择的深度智能优化目标,该广告产生一次转化的平均费用。计算逻辑:广告花费/深度转化目标量。部分需接入转化跟踪后可统计。
+     */
+    @SerializedName("deep_conversions_cost")
+    private Long deepConversionsCost;
+
+    /**
+     * 深度目标转化率-灰度中。朋友圈:深度转化目标量/可转化点击次数。公众号:深度转化目标量/点击次数。指标随深度转化功能灰度中。接入转化跟踪后可统计。
+     */
+    @SerializedName("deep_conversions_rate")
+    private Double deepConversionsRate;
+
+    /**
+     * 关键页面访问人数。点击广告原生落地页的APP跳转按钮,并到达APP内关键页面的独立用户数。接入转化跟踪后可统计。
+     */
+    @SerializedName("key_page_uv")
+    private Long keyPageUv;
+
+    /**
+     * 下单量。用户通过该广告进行商品成交(如下单提交、在线支付)的次数。接入转化跟踪后可统计。
+     */
+    @SerializedName("order_count")
+    private Long orderCount;
+
+    /**
+     * 首日新增下单量。广告推广获取的用户,点击广告当日,带来的下单次数。接入转化跟踪后可统计。
+     */
+    @SerializedName("first_day_order_count")
+    private Long firstDayOrderCount;
+
+    /**
+     * 下单成本(次数)。产生一次下单的成本。
+     */
+    @SerializedName("web_order_cost")
+    private Long webOrderCost;
+
+    /**
+     * 下单率。一次点击到下单的转化率。
+     */
+    @SerializedName("order_rate")
+    private Double orderRate;
+
+    /**
+     * 下单金额。广告带来的总订单金额(即销售额)。接入转化跟踪后可统计。
+     */
+    @SerializedName("order_amount")
+    private Long orderAmount;
+
+    /**
+     * 首日新增下单金额。广告推广获取的用户,点击广告当日,带来的总订单金额(即销售额)。接入转化跟踪后可统计。
+     */
+    @SerializedName("first_day_order_amount")
+    private Long firstDayOrderAmount;
+
+    /**
+     * 下单客单价。下单金额/下单量。接入转化跟踪后可统计。
+     */
+    @SerializedName("order_unit_price")
+    private Long orderUnitPrice;
+
+    /**
+     * 下单ROI。下单金额/广告花费。接入转化跟踪后可统计。
+     */
+    @SerializedName("order_roi")
+    private Double orderRoi;
+
+    /**
+     * 签收次数。签收从广告主处购买的商品的次数。接入转化跟踪后可统计。
+     */
+    @SerializedName("sign_in_count")
+    private Long signInCount;
+
+    /**
+     * 加收藏次数。用户将广告主商品加入收藏的次数,接入转化跟踪后可统计。
+     */
+    @SerializedName("add_wishlist_count")
+    private Long addWishlistCount;
+
+    /**
+     * 商品详情页浏览人数。浏览广告主商品详情页的独立用户数。接入转化跟踪后可统计。
+     */
+    @SerializedName("view_commodity_page_uv")
+    private Long viewCommodityPageUv;
+
+    /**
+     * 销售线索次数。广告带来的用户销售线索数量。H5推广需接入转化跟踪后可统计。
+     */
+    @SerializedName("page_reservation_count")
+    private Long pageReservationCount;
+
+    /**
+     * 首次付费人数。广告带来的有付费行为的销售线索数量按用户去重,同一个用户多次提交仅计算一次(原付费销售线索人数)。H5推广需接入转化跟踪后可统计。
+     */
+    @SerializedName("leads_purchase_uv")
+    private Long leadsPurchaseUv;
+
+    /**
+     * 首次付费成本(人数)。产生一个首次付费用户的成本(原付费销售线索成本)。H5推广需接入转化跟踪后可统计。
+     */
+    @SerializedName("leads_purchase_cost")
+    private Long leadsPurchaseCost;
+
+    /**
+     * 首次付费转化率(人数)。一次点击到产生一个首次付费用户的转化率(原付费销售线索转化率)。H5推广需接入转化跟踪后可统计。
+     */
+    @SerializedName("leads_purchase_rate")
+    private Double leadsPurchaseRate;
+
+    /**
+     * 加企业微信客服人数。添加企业微信好友成功的独立用户数。
+     */
+    @SerializedName("scan_follow_count")
+    private Long scanFollowCount;
+
+    /**
+     * 小游戏注册人数。通过广告首次登录小游戏的独立用户数。
+     */
+    @SerializedName("wechat_app_register_uv")
+    private Long wechatAppRegisterUv;
+
+    /**
+     * 小游戏注册成本(人数)。产生一个小游戏注册人数的成本。
+     */
+    @SerializedName("wechat_minigame_register_cost")
+    private Long wechatMinigameRegisterCost;
+
+    /**
+     * 小游戏注册率。一次点击到小游戏注册的转化率。
+     */
+    @SerializedName("wechat_minigame_register_rate")
+    private Double wechatMinigameRegisterRate;
+
+    /**
+     * 首日新增广告ARPU。广告带来的注册用户,在注册当日,产生的平均广告变现收入。注:该指标天更新,可以查看昨天及以前的数据。
+     */
+    @SerializedName("wechat_minigame_arpu")
+    private Double wechatMinigameArpu;
+
+    /**
+     * 小游戏次留人数。通过广告首次登录小游戏,并在第二天再次登录的独立用户数。接入转化跟踪后可统计。
+     */
+    @SerializedName("wechat_minigame_retention_count")
+    private Long wechatMinigameRetentionCount;
+
+    /**
+     * 小游戏付费次数。通过广告进入小游戏并完成付费的次数。接入转化跟踪后可统计。
+     */
+    @SerializedName("wechat_minigame_checkout_count")
+    private Long wechatMinigameCheckoutCount;
+
+    /**
+     * 小游戏付费金额。通过广告进入小游戏并完成付费的金额。接入转化跟踪后可统计。
+     */
+    @SerializedName("wechat_minigame_checkout_amount")
+    private Long wechatMinigameCheckoutAmount;
+
+    /**
+     * 公众号关注次数。用户通过广告关注公众号成功的次数。
+     */
+    @SerializedName("official_account_follow_count")
+    private Long officialAccountFollowCount;
+
+    /**
+     * 公众号关注成本(次数)。产生一次公众号关注的成本。
+     */
+    @SerializedName("official_account_follow_cost")
+    private Long officialAccountFollowCost;
+
+    /**
+     * 公众号关注率。一次点击到公众号关注的转化率。
+     */
+    @SerializedName("official_account_follow_rate")
+    private Double officialAccountFollowRate;
+
+    /**
+     * 公众号内注册人数。用户通过关注类广告关注公众号后,在公众号内部产生了广告主定义的注册行为的人数(UV)。接入转化跟踪后可统计(公众号接入暂未全量开放)。
+     */
+    @SerializedName("official_account_register_user_count")
+    private Long officialAccountRegisterUserCount;
+
+    /**
+     * 公众号内注册比例。公众号内注册独立用户数/公众号关注次数。接入转化跟踪后可统计(公众号接入暂未全量开放)。
+     */
+    @SerializedName("official_account_register_rate")
+    private Double officialAccountRegisterRate;
+
+    /**
+     * 公众号内注册成本。广告花费/广告产生的注册行为数量。接入转化跟踪后可统计(公众号接入暂未全量开放)。
+     */
+    @SerializedName("official_account_register_cost")
+    private Long officialAccountRegisterCost;
+
+    /**
+     * 公众号内注册订单金额。用户通过关注类广告关注公众号后,在公众号内部产生了广告主定义的订单行为的订单金额(即销售额)。接入转化跟踪后可统计(公众号接入暂未全量开放)。
+     */
+    @SerializedName("official_account_register_amount")
+    private Long officialAccountRegisterAmount;
+
+    /**
+     * 公众号内注册ROI。注册产生的订单金额累计/广告花费。接入转化跟踪后可统计(公众号接入暂未全量开放)。
+     */
+    @SerializedName("official_account_register_roi")
+    private Long officialAccountRegisterRoi;
+
+    /**
+     * 公众号内填单次数。用户通过关注类广告关注公众号后,在公众号内部产生了广告主定义的填单行为的数量。接入转化跟踪后可统计(公众号接入暂未全量开放)。
+     */
+    @SerializedName("official_account_apply_count")
+    private Long officialAccountApplyCount;
+
+    /**
+     * 公众号内填单人数。用户通过关注类广告关注公众号后,在公众号内部产生了广告主定义的填单行为的独立用户数。接入转化跟踪后可统计(公众号接入暂未全量开放)。
+     */
+    @SerializedName("official_account_apply_user_count")
+    private Long officialAccountApplyUserCount;
+
+    /**
+     * 公众号内填单比例。公众号内填单的独立用户数/公众号关注次数。接入转化跟踪后可统计(公众号接入暂未全量开放)。
+     */
+    @SerializedName("official_account_apply_rate")
+    private Double officialAccountApplyRate;
+
+    /**
+     * 公众号内填单成本。广告花费/广告产生的填单行为数量。接入转化跟踪后可统计(公众号接入暂未全量开放) 。
+     */
+    @SerializedName("official_account_apply_cost")
+    private Long officialAccountApplyCost;
+
+    /**
+     * 公众号内填单金额。用户通过关注类广告关注公众号后,在公众号内部产生了广告主定义的填单行为的订单金额(即销售额)。接入转化跟踪后可统计(公众号接入暂未全量开放)。
+     */
+    @SerializedName("official_account_apply_amount")
+    private Long officialAccountApplyAmount;
+
+    /**
+     * 公众号内填单ROI。填单产生的订单金额累计/广告花费。接入转化跟踪后可统计(公众号接入暂未全量开放) 。
+     */
+    @SerializedName("official_account_apply_roi")
+    private Long officialAccountApplyRoi;
+
+    /**
+     * 公众号内下单次数。用户通过关注类广告关注公众号后,在公众号内部产生了广告主定义的下单行为的数量。接入转化跟踪后可统计(公众号接入暂未全量开放) 。
+     */
+    @SerializedName("official_account_order_count")
+    private Long officialAccountOrderCount;
+
+    /**
+     * 首日公众号内下单次数。广告推广获取的用户,在关注公众号当日,在公众号内部产生了广告主定义的下单行为数量。接入转化跟踪后可统计。
+     */
+    @SerializedName("official_account_first_day_order_count")
+    private Long officialAccountFirstDayOrderCount;
+
+    /**
+     * 公众号内下单人数。用户通过关注类广告关注公众号后,在公众号内部产生了广告主定义的下单行为的独立用户数。接入转化跟踪后可统计(公众号接入暂未全量开放) 。
+     */
+    @SerializedName("official_account_order_user_count")
+    private Long officialAccountOrderUserCount;
+
+    /**
+     * 公众号内下单比例。公众号内下单独立用户数(UV)/公众号关注次数。接入转化跟踪后可统计(公众号接入暂未全量开放) 。
+     */
+    @SerializedName("official_account_order_rate")
+    private Double officialAccountOrderRate;
+
+    /**
+     * 公众号内下单成本。广告花费/广告产生的下单行为数量。接入转化跟踪后可统计(公众号接入暂未全量开放) 。
+     */
+    @SerializedName("official_account_order_cost")
+    private Long officialAccountOrderCost;
+
+    /**
+     * 公众号内下单金额。用户通过关注类广告关注公众号后,在公众号内部产生了广告主定义的下单行为的订单金额(即销售额)。接入转化跟踪后可统计(公众号接入暂未全量开放) 。
+     */
+    @SerializedName("official_account_order_amount")
+    private Long officialAccountOrderAmount;
+
+    /**
+     * 首日公众号内下单金额。广告推广获取的用户,在关注公众号当日,在公众号内部产生了广告主定义的下单行为订单金额(即销售额)。接入转化跟踪后可统计。
+     */
+    @SerializedName("official_account_first_day_order_amount")
+    private Long officialAccountFirstDayOrderAmount;
+
+    /**
+     * 公众号内下单ROI。下单产生的订单金额累计/广告花费。接入转化跟踪后可统计(公众号接入暂未全量开放) 。
+     */
+    @SerializedName("official_account_order_roi")
+    private Long officialAccountOrderRoi;
+
+    /**
+     * 公众号内发消息人数。用户关注公众号后,在公众号对话框内发送消息的独立用户数。
+     */
+    @SerializedName("official_account_consult_count")
+    private Long officialAccountConsultCount;
+
+    /**
+     * 阅读粉丝量。近3日新增的粉丝中产生阅读行为的用户数。
+     */
+    @SerializedName("official_account_reader_count")
+    private Long officialAccountReaderCount;
+
+    /**
+     * 公众号内进件人数。在公众号内完整提交贷款申请资料的独立用户数。接入转化跟踪后可统计。
+     */
+    @SerializedName("official_account_credit_apply_user_count")
+    private Long officialAccountCreditApplyUserCount;
+
+    /**
+     * 公众号内授信人数。在公众号内完整提交贷款申请资料,并通过放款方的资质审核的独立用户数。接入转化跟踪后可统计。
+     */
+    @SerializedName("official_account_credit_user_count")
+    private Long officialAccountCreditUserCount;
+
+    /**
+     * 广告分享次数。用户将广告落地页分享给好友和朋友圈的次数。
+     */
+    @SerializedName("forward_count")
+    private Long forwardCount;
+
+    /**
+     * 广告分享人数。将广告落地页分享给好友和朋友圈的独立用户数。
+     */
+    @SerializedName("forward_user_count")
+    private Long forwardUserCount;
+
+    /**
+     * 不感兴趣点击次数。用户点击“不感兴趣”的次数。
+     */
+    @SerializedName("no_interest_count")
+    private Long noInterestCount;
+}

+ 11 - 1
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/entity/AdDataOfMinuteODS.java

@@ -1,22 +1,26 @@
 package flink.zanxiangnet.ad.monitoring.pojo.entity;
 
 import com.google.gson.annotations.SerializedName;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.annotation.MaxComputeColumn;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.DataTypeEnum;
+import flink.zanxiangnet.ad.monitoring.maxcompute.bean.annotation.MaxComputeTable;
 import lombok.Data;
 
 import java.io.Serializable;
-import java.time.LocalDate;
 
 /**
  * 原始数据
  * 每 5分钟拉取的广告原始数据
  */
 @Data
+@MaxComputeTable("ad_data_of_minute_ods")
 public class AdDataOfMinuteODS implements Serializable {
     private static final long serialVersionUID = 1L;
 
     /**
      * 统计日期(用于 MaxCompute分区)
      */
+    @MaxComputeColumn(isPartitioned = true)
     @SerializedName("stat_day")
     private String statDay;
 
@@ -290,6 +294,12 @@ public class AdDataOfMinuteODS implements Serializable {
     @SerializedName("official_account_follow_count")
     private Long officialAccountFollowCount;
 
+    /**
+     * 公众号关注成本(次数)。产生一次公众号关注的成本。
+     */
+    @SerializedName("official_account_follow_cost")
+    private Long officialAccountFollowCost;
+
     /**
      * 公众号关注率。一次点击到公众号关注的转化率。
      */

+ 9 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/properties/ApplicationProperties.java

@@ -0,0 +1,9 @@
+package flink.zanxiangnet.ad.monitoring.pojo.properties;
+
+public class ApplicationProperties {
+    public static final String MAX_COMPUTE_ACCOUNT_ID = "maxCompute.accountId";
+    public static final String MAX_COMPUTE_ACCOUNT_KEY = "maxCompute.accountKey";
+    public static final String MAX_COMPUTE_ACCOUNT_ENDPOINT = "maxCompute.endpoint";
+    public static final String MAX_COMPUTE_ACCOUNT_PROJECT_NAME = "maxCompute.projectName";
+    public static final String MAX_COMPUTE_ACCOUNT_TUNNEL_ENDPOINT = "maxCompute.tunnelEndpoint";
+}

+ 10 - 0
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/pojo/properties/KafkaProperties.java

@@ -0,0 +1,10 @@
+package flink.zanxiangnet.ad.monitoring.pojo.properties;
+
+public class KafkaProperties {
+    public static final String KAFKA_SERVERS = "kafka.servers";
+    public static final String KAFKA_USERNAME = "kafka.username";
+    public static final String KAFKA_PASSWORD = "kafka.password";
+    public static final String KAFKA_SSL_PATH = "kafka.sslPath";
+    public static final String KAFKA_TOPIC = "kafka.topic";
+    public static final String KAFKA_GROUP_ID = "kafka.groupId";
+}

+ 1 - 3
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/util/bean/ObjectUtil.java → flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/util/ObjectUtil.java

@@ -1,6 +1,4 @@
-package flink.zanxiangnet.ad.monitoring.util.bean;
-
-import flink.zanxiangnet.ad.monitoring.util.DateUtil;
+package flink.zanxiangnet.ad.monitoring.util;
 
 import java.math.BigDecimal;
 import java.math.BigInteger;

+ 0 - 15
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/util/bean/DataMapping.java

@@ -1,15 +0,0 @@
-package flink.zanxiangnet.ad.monitoring.util.bean;
-
-import java.lang.annotation.*;
-
-@Target(ElementType.FIELD)
-@Retention(RetentionPolicy.RUNTIME)
-@Documented
-public @interface DataMapping {
-
-    boolean ignore() default false;
-
-    String columnName() default "";
-
-    DataTypeEnum dataType();
-}

+ 0 - 42
flink-ad-monitoring/src/main/java/flink/zanxiangnet/ad/monitoring/util/bean/FieldInfo.java

@@ -1,42 +0,0 @@
-package flink.zanxiangnet.ad.monitoring.util.bean;
-
-import lombok.AllArgsConstructor;
-import lombok.Builder;
-import lombok.Data;
-import lombok.NoArgsConstructor;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-/**
- * @Author wcc
- * @Date 2020/12/3 16:08
- * @Version 1.0
- * @Description excel中列的信息
- */
-@Data
-@NoArgsConstructor
-@AllArgsConstructor
-@Builder
-public class FieldInfo {
-    /**
-     * 字段名
-     */
-    private String fieldName;
-    /**
-     * 数据库中的列名
-     */
-    private String columnName;
-
-    private Class<?> fieldType;
-    /**
-     * 数据库中保存的类型
-     */
-    private DataTypeEnum columnType;
-
-    private Field field;
-
-    private Method getMethod;
-
-    private Method setMethod;
-}

+ 7 - 0
flink-ad-monitoring/src/main/resources/ad_stream_of_day.properties

@@ -0,0 +1,7 @@
+kafka.servers=114.55.59.94:9093,112.124.33.132:9093
+kafka.username=alikafka_pre-cn-tl32fsx4l00x
+kafka.password=VOEdhZLjOrL76lrl5bqPtydtoEkbs0Ny
+kafka.sslPath=D:\\Downloads\\kafka.client.truststore.jks
+# kafka.sslPath=/root/flink-1.13.2/kafka.client.truststore.jks
+kafka.topic=ad_day_cost_topic
+kafka.groupId=ad_day_cost_group

+ 7 - 0
flink-ad-monitoring/src/main/resources/ad_stream_of_minute.properties

@@ -0,0 +1,7 @@
+kafka.servers=114.55.59.94:9093,112.124.33.132:9093
+kafka.username=alikafka_pre-cn-tl32fsx4l00x
+kafka.password=VOEdhZLjOrL76lrl5bqPtydtoEkbs0Ny
+kafka.sslPath=D:\\Downloads\\kafka.client.truststore.jks
+# kafka.sslPath=/root/flink-1.13.2/kafka.client.truststore.jks
+kafka.topic=ad_cost_topic
+kafka.groupId=ad_cost_group

+ 5 - 0
flink-ad-monitoring/src/main/resources/application.properties

@@ -0,0 +1,5 @@
+maxCompute.accountId=LTAI5tFuLw65UsH3tqru2K1h
+maxCompute.accountKey=p1F8my4ovgcEfs3HVORdmeLlLUUKRp
+maxCompute.endpoint=http://service.cn-hangzhou.maxcompute.aliyun.com/api
+maxCompute.projectName=zx_ad_monitoring
+maxCompute.tunnelEndpoint=http://dt.cn-hangzhou.maxcompute.aliyun.com