You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kylin.apache.org by xx...@apache.org on 2022/08/21 22:55:06 UTC

[kylin] 01/02: KYLIN-5221 add model, monitor, loaddata, datasource, quick start doc

This is an automated email from the ASF dual-hosted git repository.

xxyu pushed a commit to branch doc5.0
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit 6be000a9857f1b5add79653e7fbddc6245b52b3f
Author: Mukvin <bo...@163.com>
AuthorDate: Fri Aug 19 17:58:11 2022 +0800

    KYLIN-5221 add model, monitor, loaddata, datasource, quick start doc
---
 website/docs/datasource/data_sampling.md           |  74 ++++
 website/docs/datasource/images/hive_datasource.png | Bin 0 -> 57779 bytes
 website/docs/datasource/images/sample.png          | Bin 0 -> 603 bytes
 website/docs/datasource/images/sample_data.png     | Bin 0 -> 114830 bytes
 website/docs/datasource/images/select_dataset.png  | Bin 0 -> 113355 bytes
 website/docs/datasource/images/ssb_column.png      | Bin 0 -> 80457 bytes
 website/docs/datasource/images/target_table.png    | Bin 0 -> 76861 bytes
 website/docs/datasource/import_hive.md             |  74 ++++
 website/docs/datasource/intro.md                   |  26 ++
 .../docs/deployment/on-premises/prerequisite.md    |   1 -
 website/docs/modeling/data_modeling.md             |  64 ++++
 website/docs/modeling/images/add_dimention.gif     | Bin 0 -> 305969 bytes
 .../docs/modeling/images/add_join_relations.png    | Bin 0 -> 45663 bytes
 website/docs/modeling/images/add_measure.png       | Bin 0 -> 283425 bytes
 website/docs/modeling/images/add_model_set.png     | Bin 0 -> 105678 bytes
 website/docs/modeling/images/add_tables.png        | Bin 0 -> 76148 bytes
 .../docs/modeling/images/create_join_relations.png | Bin 0 -> 345446 bytes
 website/docs/modeling/images/model_list.png        | Bin 0 -> 144135 bytes
 .../modeling/images/model_overview/dimensions.png  | Bin 0 -> 270458 bytes
 website/docs/modeling/images/model_overview/er.png | Bin 0 -> 240832 bytes
 .../modeling/images/model_overview/measures.png    | Bin 0 -> 209326 bytes
 .../images/model_overview/unfold_model.png         | Bin 0 -> 337877 bytes
 .../docs/modeling/images/modellist_more_info.png   | Bin 0 -> 330441 bytes
 website/docs/modeling/images/reduceio.png          | Bin 0 -> 155351 bytes
 .../modeling/images/responsetime_datavolume.png    | Bin 0 -> 94683 bytes
 website/docs/modeling/images/save_load_method.png  | Bin 0 -> 112102 bytes
 .../docs/modeling/images/star_model_created.png    | Bin 0 -> 90333 bytes
 .../docs/modeling/images/switch_to_fact_table.png  | Bin 0 -> 76376 bytes
 website/docs/modeling/images/time_format.png       | Bin 0 -> 655 bytes
 website/docs/modeling/load_data/build_index.md     |  47 +++
 website/docs/modeling/load_data/by_date.md         |  68 ++++
 website/docs/modeling/load_data/full_build.md      |  46 +++
 .../load_data/images/build_empty_index_en.png      | Bin 0 -> 42914 bytes
 .../modeling/load_data/images/build_summary.png    | Bin 0 -> 141411 bytes
 .../load_data/images/by_date/buildcube_0.png       | Bin 0 -> 220980 bytes
 .../load_data/images/by_date/buildcube_1.png       | Bin 0 -> 50730 bytes
 .../load_data/images/by_date/buildcube_2.png       | Bin 0 -> 153458 bytes
 .../load_data/images/by_date/buildcube_3.png       | Bin 0 -> 148443 bytes
 .../images/customize_build_save_model.png          | Bin 0 -> 72711 bytes
 .../customize_build_save_model_partition.png       | Bin 0 -> 69557 bytes
 .../docs/modeling/load_data/images/full_build.png  | Bin 0 -> 32928 bytes
 .../modeling/load_data/images/full_build_cube.png  | Bin 0 -> 36363 bytes
 .../load_data/images/full_build_segment_info.png   | Bin 0 -> 37628 bytes
 .../load_data/images/full_load/full_load.png       | Bin 0 -> 131982 bytes
 .../load_data/images/full_load/full_load_smart.png | Bin 0 -> 41844 bytes
 .../load_data/images/full_load/load_data.png       | Bin 0 -> 169454 bytes
 .../load_data/images/full_load/load_data_smart.png | Bin 0 -> 57021 bytes
 .../modeling/load_data/images/full_load/notice.png | Bin 0 -> 123106 bytes
 .../load_data/images/full_load/notice_smart.png    | Bin 0 -> 53973 bytes
 .../load_data/images/full_load/refresh.png         | Bin 0 -> 55516 bytes
 .../load_data/images/full_load/refresh_smart.png   | Bin 0 -> 51236 bytes
 .../load_data/images/load_by_date/add_segment.png  | Bin 0 -> 174978 bytes
 .../images/load_by_date/build_all_index.png        | Bin 0 -> 491221 bytes
 .../images/load_by_date/build_all_index2.png       | Bin 0 -> 197965 bytes
 .../load_data/images/load_by_date/build_index.png  | Bin 0 -> 478215 bytes
 .../images/load_by_date/build_index_by_segment.png | Bin 0 -> 178721 bytes
 .../load_data/images/load_by_date/delete_index.png | Bin 0 -> 483308 bytes
 .../modeling/load_data/images/load_by_date/job.png | Bin 0 -> 55216 bytes
 .../load_data/images/load_by_date/job_smart.png    | Bin 0 -> 55216 bytes
 .../load_data/images/load_by_date/load_data.png    | Bin 0 -> 168547 bytes
 .../load_data/images/load_by_date/load_ok.png      | Bin 0 -> 139019 bytes
 .../load_data/images/load_by_date/load_ok_2.png    | Bin 0 -> 58070 bytes
 .../images/load_by_date/load_ok_smart.png          | Bin 0 -> 50925 bytes
 .../load_data/images/load_by_date/load_twice.png   | Bin 0 -> 176020 bytes
 .../load_data/images/load_by_date/model_list_3.png | Bin 0 -> 194216 bytes
 .../load_data/images/load_by_date/notice_2.png     | Bin 0 -> 169862 bytes
 .../load_data/images/load_by_date/range_en.png     | Bin 0 -> 90045 bytes
 .../images/load_by_date/range_smart_2_en.png       | Bin 0 -> 59387 bytes
 .../images/load_by_date/range_smart_en.png         | Bin 0 -> 58139 bytes
 .../images/load_by_date/source_smart_en.png        | Bin 0 -> 157955 bytes
 .../docs/modeling/load_data/images/segment_2.png   | Bin 0 -> 146180 bytes
 .../docs/modeling/load_data/images/streaming/g.png | Bin 0 -> 132606 bytes
 .../docs/modeling/load_data/images/streaming/h.png | Bin 0 -> 84180 bytes
 .../docs/modeling/load_data/images/streaming/i.png | Bin 0 -> 69850 bytes
 .../docs/modeling/load_data/images/streaming/j.png | Bin 0 -> 115502 bytes
 .../docs/modeling/load_data/images/streaming/k.png | Bin 0 -> 121139 bytes
 .../docs/modeling/load_data/images/streaming/l.png | Bin 0 -> 138717 bytes
 .../docs/modeling/load_data/images/streaming/m.png | Bin 0 -> 202509 bytes
 .../docs/modeling/load_data/images/streaming/n.png | Bin 0 -> 117677 bytes
 .../docs/modeling/load_data/images/time_axis.png   | Bin 0 -> 31394 bytes
 .../load_data/images/time_axis_customize_build.png | Bin 0 -> 77706 bytes
 website/docs/modeling/load_data/intro.md           |  28 ++
 .../segment_operation_settings/images/segment.png  | Bin 0 -> 171751 bytes
 .../load_data/segment_operation_settings/intro.md  |  94 +++++
 .../segment_operation_settings/segment_merge.md    | 163 ++++++++
 website/docs/modeling/manual_modeling.md           | 210 +++++++++++
 website/docs/modeling/model_concepts_operations.md | 137 +++++++
 .../model_design/advance_guide/fast_bitmap.md      |  73 ++++
 .../advance_guide/images/add_index.png             | Bin 0 -> 159722 bytes
 .../advance_guide/images/low_usage.png             | Bin 0 -> 58193 bytes
 .../model_design/advance_guide/images/model.png    | Bin 0 -> 54246 bytes
 .../advance_guide/images/model_check.png           | Bin 0 -> 49547 bytes
 .../advance_guide/images/model_config_1.png        | Bin 0 -> 75469 bytes
 .../advance_guide/images/model_config_2.png        | Bin 0 -> 50629 bytes
 .../advance_guide/images/model_export.png          | Bin 0 -> 171441 bytes
 .../advance_guide/images/model_publish.png         | Bin 0 -> 57324 bytes
 .../advance_guide/images/model_upload.png          | Bin 0 -> 54934 bytes
 .../multilevel_partioning_build_subp_first.png     | Bin 0 -> 205443 bytes
 .../multilevel_partioning_build_subp_second.png    | Bin 0 -> 102751 bytes
 .../multilevel_partioning_build_subp_second_0.png  | Bin 0 -> 115055 bytes
 .../multilevel_partioning_build_subp_second_1.png  | Bin 0 -> 144275 bytes
 .../multilevel_partitioning_add_subp_value.png     | Bin 0 -> 63550 bytes
 .../images/multilevel_partitioning_close.png       | Bin 0 -> 103983 bytes
 .../images/multilevel_partitioning_model_save.png  | Bin 0 -> 141412 bytes
 .../images/multilevel_partitioning_model_set.png   | Bin 0 -> 18583 bytes
 .../images/multilevel_partitioning_query.png       | Bin 0 -> 118218 bytes
 .../images/multilevel_partitioning_set.png         | Bin 0 -> 49137 bytes
 .../images/multilevel_partitioning_subp_value.png  | Bin 0 -> 127585 bytes
 .../advance_guide/images/query_new.png             | Bin 0 -> 1609864 bytes
 .../advance_guide/images/query_old.png             | Bin 0 -> 1610676 bytes
 .../advance_guide/images/spark_plan_new.png        | Bin 0 -> 108820 bytes
 .../advance_guide/images/spark_plan_old.png        | Bin 0 -> 227357 bytes
 .../model_design/advance_guide/integer_encoding.md |  36 ++
 .../modeling/model_design/advance_guide/intro.md   |  17 +
 .../advance_guide/model_metadata_managment.md      | 133 +++++++
 .../advance_guide/multilevel_partitioning.md       | 160 ++++++++
 .../modeling/model_design/aggregation_group.md     | 416 +++++++++++++++++++++
 .../docs/modeling/model_design/computed_column.md  | 206 ++++++++++
 .../docs/modeling/model_design/data_modeling.md    |  66 ++++
 .../modeling/model_design/images/agg/AGG-1.png     | Bin 0 -> 31010 bytes
 .../modeling/model_design/images/agg/AGG-2.png     | Bin 0 -> 60856 bytes
 .../modeling/model_design/images/agg/AGG-3.png     | Bin 0 -> 63213 bytes
 .../modeling/model_design/images/agg/AGG-4.png     | Bin 0 -> 75281 bytes
 .../modeling/model_design/images/agg/AGG-5.png     | Bin 0 -> 7453 bytes
 .../modeling/model_design/images/agg/AGG-6.png     | Bin 0 -> 10247 bytes
 .../model_design/images/agg/Hierarchy-2.png        | Bin 0 -> 20186 bytes
 .../model_design/images/agg/Hierarchy-3.png        | Bin 0 -> 169326 bytes
 .../model_design/images/agg/Hierarchy-4.png        | Bin 0 -> 27973 bytes
 .../model_design/images/agg/Mandatory-2.png        | Bin 0 -> 170616 bytes
 .../model_design/images/agg/Mandatory-3.png        | Bin 0 -> 46395 bytes
 .../model_design/images/agg/Mandatory-4.png        | Bin 0 -> 23467 bytes
 .../model_design/images/agg/advanced-EN-01.png     | Bin 0 -> 205276 bytes
 .../model_design/images/agg/advanced-EN-02.png     | Bin 0 -> 209971 bytes
 .../modeling/model_design/images/agg/advanced.png  | Bin 0 -> 301048 bytes
 .../model_design/images/agg/agg-group-1.png        | Bin 0 -> 183833 bytes
 .../model_design/images/agg/agg-group-2.png        | Bin 0 -> 100574 bytes
 .../model_design/images/agg/agg-group-3.png        | Bin 0 -> 61438 bytes
 .../model_design/images/agg/agg-group-4.png        | Bin 0 -> 66509 bytes
 .../model_design/images/agg/agg_detail.png         | Bin 0 -> 201318 bytes
 .../model_design/images/agg/agg_index_2.png        | Bin 0 -> 121116 bytes
 .../model_design/images/agg/agg_measure.png        | Bin 0 -> 247240 bytes
 .../docs/modeling/model_design/images/agg_1.png    | Bin 0 -> 303722 bytes
 .../docs/modeling/model_design/images/agg_2.png    | Bin 0 -> 307027 bytes
 .../modeling/model_design/images/agg_measure.png   | Bin 0 -> 247240 bytes
 .../images/auto_modeling/convert_no.en.png         | Bin 0 -> 147334 bytes
 .../images/auto_modeling/convert_or_not.en.png     | Bin 0 -> 347569 bytes
 .../images/auto_modeling/convert_yes_model.en.png  | Bin 0 -> 150441 bytes
 .../auto_modeling/convert_yes_recommend_en.png     | Bin 0 -> 166435 bytes
 .../images/auto_modeling/import_sql.en.png         | Bin 0 -> 358875 bytes
 .../images/computed_column/cc_en_1.png             | Bin 0 -> 370515 bytes
 .../images/computed_column/cc_en_10.png            | Bin 0 -> 314249 bytes
 .../images/computed_column/cc_en_11.png            | Bin 0 -> 258431 bytes
 .../images/computed_column/cc_en_2.png             | Bin 0 -> 92436 bytes
 .../images/computed_column/cc_en_3.png             | Bin 0 -> 153929 bytes
 .../images/computed_column/cc_en_4.png             | Bin 0 -> 101795 bytes
 .../images/computed_column/cc_en_5.png             | Bin 0 -> 252495 bytes
 .../images/computed_column/cc_en_6.png             | Bin 0 -> 145607 bytes
 .../images/computed_column/cc_en_7.png             | Bin 0 -> 369282 bytes
 .../images/computed_column/cc_en_8.png             | Bin 0 -> 91059 bytes
 .../computed_column/computed_column_en.7.png       | Bin 0 -> 214934 bytes
 .../computed_column/computed_column_en.8.png       | Bin 0 -> 472530 bytes
 .../images/count_distinct/cd_measures.png          | Bin 0 -> 128445 bytes
 .../images/count_distinct/cd_measures_add.1.png    | Bin 0 -> 73495 bytes
 .../images/count_distinct/cd_measures_add.2.png    | Bin 0 -> 99761 bytes
 .../images/count_distinct/cd_measures_add.3.png    | Bin 0 -> 120209 bytes
 .../images/count_distinct/cd_measures_add.4.png    | Bin 0 -> 33140 bytes
 .../images/count_distinct/cd_measures_add.5.png    | Bin 0 -> 174009 bytes
 .../images/count_distinct/cd_measures_add.7.png    | Bin 0 -> 12909 bytes
 .../images/count_distinct/cd_measures_add.8.png    | Bin 0 -> 10685 bytes
 .../images/count_distinct/cd_measures_add.9.png    | Bin 0 -> 10209 bytes
 .../images/count_distinct/cd_measures_add.png      | Bin 0 -> 101130 bytes
 .../count_distinct/cd_measures_add_precisely.png   | Bin 0 -> 88993 bytes
 .../images/count_distinct/cd_measures_edit.png     | Bin 0 -> 99761 bytes
 .../images/count_distinct/cd_meausres_add.6.png    | Bin 0 -> 10100 bytes
 .../images/count_distinct/wd_datasample.png        | Bin 0 -> 68299 bytes
 .../modeling/model_design/images/import_sql.en.jpg | Bin 0 -> 359231 bytes
 .../docs/modeling/model_design/images/index_1.png  | Bin 0 -> 475186 bytes
 .../modeling/model_design/images/mdc/index_mdc.png | Bin 0 -> 1696743 bytes
 .../modeling/model_design/images/mdc/intro_mdc.png | Bin 0 -> 236778 bytes
 .../model_design/images/mdc/single_mdc.png         | Bin 0 -> 181062 bytes
 .../modeling/model_design/images/mdc/total_mdc.png | Bin 0 -> 213552 bytes
 .../modeling/model_design/images/model_SCD1.png    | Bin 0 -> 110272 bytes
 .../modeling/model_design/images/model_SCD2.png    | Bin 0 -> 96419 bytes
 .../images/model_check/24_model_diagnose_1.png     | Bin 0 -> 126615 bytes
 .../images/model_check/24_model_diagnose_2.png     | Bin 0 -> 102317 bytes
 .../images/model_check/24_model_diagnose_4.png     | Bin 0 -> 108428 bytes
 .../images/model_check/24_model_diagnose_6.png     | Bin 0 -> 89494 bytes
 .../images/model_check/25_model_check.png          | Bin 0 -> 99835 bytes
 .../images/model_check/25_model_save.png           | Bin 0 -> 121755 bytes
 .../model_design/images/model_design/add_table.png | Bin 0 -> 139656 bytes
 .../model_design/images/model_design/dimension.png | Bin 0 -> 102709 bytes
 .../images/model_design/dimension_2.png            | Bin 0 -> 69815 bytes
 .../model_design/images/model_design/join.png      | Bin 0 -> 106888 bytes
 .../model_design/images/model_design/measure.png   | Bin 0 -> 63310 bytes
 .../model_design/images/model_design/measure_2.png | Bin 0 -> 63618 bytes
 .../model_design/images/model_design/measure_3.png | Bin 0 -> 252965 bytes
 .../images/model_design/model_edit_page.png        | Bin 0 -> 260928 bytes
 .../model_design/model_filter_condition.en.png     | Bin 0 -> 71666 bytes
 .../images/model_design/model_result.png           | Bin 0 -> 156227 bytes
 .../images/model_design/partition_en.png           | Bin 0 -> 93138 bytes
 .../images/model_design/sync_hive_comment_en.png   | Bin 0 -> 504179 bytes
 .../images/percentile_approximate/1.en.png         | Bin 0 -> 259223 bytes
 .../percentile_approximate/cube_query.en.png       | Bin 0 -> 227126 bytes
 .../percentile_approximate/return_type.en.png      | Bin 0 -> 85870 bytes
 .../modeling/model_design/images/review_model.png  | Bin 0 -> 131496 bytes
 .../scd2/historical_dimension_table_switch.png     | Bin 0 -> 47086 bytes
 .../model_design/images/scd2/model_SCD2_5x.png     | Bin 0 -> 265324 bytes
 .../scd2/model_historical_dimension_table_scd2.png | Bin 0 -> 95690 bytes
 .../model_historical_dimension_table_scd2_join.png | Bin 0 -> 151089 bytes
 .../images/table_index/table_index_disable.png     | Bin 0 -> 85867 bytes
 .../images/table_index/table_index_enable.png      | Bin 0 -> 364900 bytes
 .../images/table_index/table_index_encode.png      | Bin 0 -> 230346 bytes
 .../images/table_index/table_index_index.png       | Bin 0 -> 248221 bytes
 .../images/table_index/table_index_sortby.png      | Bin 0 -> 273821 bytes
 .../modeling/model_design/images/table_index_1.png | Bin 0 -> 177044 bytes
 .../modeling/model_design/images/table_index_2.png | Bin 0 -> 169186 bytes
 .../modeling/model_design/images/table_index_3.png | Bin 0 -> 305850 bytes
 .../docs/modeling/model_design/images/topN_1.png   | Bin 0 -> 10172 bytes
 .../model_design/images/topN_measure_edit.png      | Bin 0 -> 102494 bytes
 website/docs/modeling/model_design/intro.md        |  17 +
 .../model_design/measure_design/collect_set.md     |  69 ++++
 .../modeling/model_design/measure_design/corr.md   |  53 +++
 .../measure_design/count_distinct_bitmap.md        |  46 +++
 .../count_distinct_case_when_expr.md               |  73 ++++
 .../measure_design/count_distinct_hllc.md          |  51 +++
 .../measure_design/images/add_collect_set.png      | Bin 0 -> 68039 bytes
 .../images/cd_measures_add_precisely.png           | Bin 0 -> 83925 bytes
 .../measure_design/images/cd_measures_edit.png     | Bin 0 -> 88159 bytes
 .../measure_design/images/collect_result.png       | Bin 0 -> 280978 bytes
 .../measure_design/images/concatws_result.png      | Bin 0 -> 278352 bytes
 .../measure_design/images/corr_add_measure.png     | Bin 0 -> 24368 bytes
 .../measure_design/images/corr_edit_measure.png    | Bin 0 -> 25500 bytes
 .../measure_design/images/corr_query.png           | Bin 0 -> 11558 bytes
 .../images/percentile_approximate.png              | Bin 0 -> 92465 bytes
 .../measure_design/images/percentile_result.png    | Bin 0 -> 211818 bytes
 .../images/percentile_result_hive.png              | Bin 0 -> 189696 bytes
 .../measure_design/images/topN_en_measure_edit.jpg | Bin 0 -> 105031 bytes
 .../measure_design/images/topn_result.png          | Bin 0 -> 159569 bytes
 .../modeling/model_design/measure_design/intro.md  |  37 ++
 .../measure_design/percentile_approx.md            |  68 ++++
 .../model_design/measure_design/sum_expression.md  | 106 ++++++
 .../modeling/model_design/measure_design/topn.md   |  66 ++++
 .../model_design/precompute_join_relations.md      |  95 +++++
 .../model_design/slowly_changing_dimension.md      |  96 +++++
 website/docs/modeling/model_design/table_index.md  |  68 ++++
 website/docs/monitor/images/job_diagnosis_web.png  | Bin 0 -> 92222 bytes
 website/docs/monitor/images/job_id.png             | Bin 0 -> 120640 bytes
 website/docs/monitor/images/job_log.png            | Bin 0 -> 494702 bytes
 website/docs/monitor/images/job_settings.png       | Bin 0 -> 30531 bytes
 website/docs/monitor/images/job_status.png         | Bin 0 -> 96180 bytes
 website/docs/monitor/images/job_type.png           | Bin 0 -> 83019 bytes
 website/docs/monitor/intro.md                      |  31 ++
 website/docs/monitor/job_concept_settings.md       | 148 ++++++++
 website/docs/monitor/job_diagnosis.md              |  70 ++++
 website/docs/monitor/job_exception_resolve.md      |  89 +++++
 website/docs/monitor/job_operations.md             |  72 ++++
 .../project-operation/images/service_state.png     | Bin 0 -> 58508 bytes
 .../project-operation/images/storage_quota.png     | Bin 0 -> 34171 bytes
 .../images/{toolbar.en.png => toolbar.png}         | Bin
 .../docs/operations/project-operation/toolbar.md   |  55 +++
 website/docs/query/insight/intro.md                |   2 +-
 website/docs/query/optimization/query_enhanced.md  |   2 -
 website/docs/query/optimization/segment_pruning.md |   3 -
 website/docs/query/pushdown/intro.md               |   2 -
 .../query/pushdown/pushdown_to_embedded_spark.md   |   2 -
 website/docs/quickstart/expert_mode_tutorial.md    | 186 +++++++++
 website/docs/quickstart/images/agg_group.png       | Bin 0 -> 254411 bytes
 website/docs/quickstart/images/dataload.png        | Bin 0 -> 174744 bytes
 website/docs/quickstart/images/dataset.png         | Bin 0 -> 221519 bytes
 website/docs/quickstart/images/datasource.png      | Bin 0 -> 236126 bytes
 website/docs/quickstart/images/job.png             | Bin 0 -> 132111 bytes
 website/docs/quickstart/images/model.png           | Bin 0 -> 261649 bytes
 website/docs/quickstart/images/query_result.png    | Bin 0 -> 169101 bytes
 website/docs/quickstart/intro.md                   |  28 ++
 website/docs/quickstart/sample_dataset.md          | 159 ++++++++
 website/docs/snapshot/snapshot_management.md       |   3 -
 website/sidebars.js                                | 172 +++++++++
 277 files changed, 3994 insertions(+), 14 deletions(-)

diff --git a/website/docs/datasource/data_sampling.md b/website/docs/datasource/data_sampling.md
new file mode 100644
index 0000000000..1223190fb6
--- /dev/null
+++ b/website/docs/datasource/data_sampling.md
@@ -0,0 +1,74 @@
+---
+title: Data Sampling
+language: en
+sidebar_label: Data Sampling
+pagination_label: Data Sampling
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - data sampling
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+Kylin provides the data sampling function to facilitate table data analysis. With data sampling, you can collect table characteristics, such as cardinality, max value, and min value for each column, to improve [model designing](../modeling/intro.md). 
+
+## Known limitations
+
+Sampling on Kafka data source is currently not supported. 
+
+## Operation steps
+
+Kylin supports data sampling during data loading. If you want to manually sample data, follow the steps below: 
+
+1. Log in to Kylin as any of the following roles:
+   - System admin
+   - Management of the target project or Admin
+
+2. In the project list at the top of the page, select the target project. 
+
+   Create a project if you have not created any projects yet. For more information, see [Create project](../operations/project-operation/project_management.md).
+
+3. In the left navigation panel, click **Data Assets** > **Data Source**.
+
+4. In the **Data Source** section, select the target table, and click ![](images/sample.png) in the right corner of the page.
+
+   ![](images/target_table.png)
+
+5. In the pop-up dialog box, enter the number of rows for sampling (from 10,000 to 20,000,000) and click **Submit**. 
+
+   The accuracy of sampled results will depend on the number of rows that are sampled, but more rows will also request more resources and time. You can set the row number based on the actual requirements. To check the progress of the sampling job, click **Monitor** > **Batch Job** in the left navigation panel. 
+
+6. (Optional) View sampled results.
+
+   - Click the **Columns** tab to view statistical information, such as the number of sampled rows (estimated value), and the cardinality, min value and max value for each sampled column. 
+
+     ![](images/ssb_column.png)
+
+   - Click the **Sample Data** tab to view the detailed information of the first 10 records. 
+
+     ![](images/sample_data.png)
+
+### FAQ
+
+Question: Why are the Chinese comments garbled in the sampled results?
+
+Answer: This issue is often caused by improper encoding settings. Please confirm whether any Chinese comments in the source Hive table are garbled via the Hive client. If yes, please modify the encoding in MySQL metabase. Below we use [CDH](../../installation/install_uninstall/install_on_cdh.en.md) platform as an example to show how to modify encoding: 
+
+1. Log in to the CDH server. 
+2. Run the `mysql -uroot -p` command and enter your password. 
+3. Run the `use metastore``;` command to enter the Metastore database. 
+4. Modify the encoding of the following columns to utf8: 
+   - Column COMMENT in COLUMNS_V2
+   - Column PARAM_VALUE in TABLE_PARAMS
+   - Column PKEY_COMMENT in PARTITION_KEYS
+
+For more information about commands, see [ALTER TABLE Statement](https://dev.mysql.com/doc/refman/5.7/en/alter-table.html). 
+
+### See also
+
+[Model](../modeling/intro.md)
+
diff --git a/website/docs/datasource/images/hive_datasource.png b/website/docs/datasource/images/hive_datasource.png
new file mode 100644
index 0000000000..9b62f6a563
Binary files /dev/null and b/website/docs/datasource/images/hive_datasource.png differ
diff --git a/website/docs/datasource/images/sample.png b/website/docs/datasource/images/sample.png
new file mode 100644
index 0000000000..4ae1822021
Binary files /dev/null and b/website/docs/datasource/images/sample.png differ
diff --git a/website/docs/datasource/images/sample_data.png b/website/docs/datasource/images/sample_data.png
new file mode 100644
index 0000000000..579acb35c8
Binary files /dev/null and b/website/docs/datasource/images/sample_data.png differ
diff --git a/website/docs/datasource/images/select_dataset.png b/website/docs/datasource/images/select_dataset.png
new file mode 100644
index 0000000000..69158cf5e4
Binary files /dev/null and b/website/docs/datasource/images/select_dataset.png differ
diff --git a/website/docs/datasource/images/ssb_column.png b/website/docs/datasource/images/ssb_column.png
new file mode 100644
index 0000000000..af43934247
Binary files /dev/null and b/website/docs/datasource/images/ssb_column.png differ
diff --git a/website/docs/datasource/images/target_table.png b/website/docs/datasource/images/target_table.png
new file mode 100644
index 0000000000..b9c0439f7d
Binary files /dev/null and b/website/docs/datasource/images/target_table.png differ
diff --git a/website/docs/datasource/import_hive.md b/website/docs/datasource/import_hive.md
new file mode 100755
index 0000000000..0ad3df7627
--- /dev/null
+++ b/website/docs/datasource/import_hive.md
@@ -0,0 +1,74 @@
+---
+title: Hive data source
+language: en
+sidebar_label: Hive data source
+pagination_label: Hive data source
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - hive data source
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+In the context of digital transformation, companies need to dig out the most valuable data to support their business decisions and growth, which is really hard to achieve with traditional data warehouses, as they can only provide minute-level or even hour-level query latency when dealing with massive data. 
+
+Leveraging the capability of Apache Hive, Kylin has effectively solved the query latency issue with its sub-second query response time on PB-level data. 
+
+Apache Hive is a distributed, fault-tolerant data warehouse software that enables analytics at a massive scale. With Apache Hive to map structured data into tables and precomputation offered by Kylin, you can easily identify and manage your business' most valuable data, and uncover new insights from any size dataset, at any time, and from anywhere. 
+
+This article introduces how to load Hive data source to a Kylin [project](../operations/project-operation/project_management.md) for [model designing](../modeling/intro.md) and [data analysis](../query/intro.md). 
+
+### Notes
+
+- Hive data types including Map, Array, Struct, and Binary are not supported. Columns of these data types will be skipped during data loading. 
+
+- Views with user-defined functions (UDF) in Hive 3 are not supported.
+
+### Operation steps
+
+1. Log in to Kylin as any of the following roles:
+   - System admin
+   - Management of the target project or Admin
+
+2. In the project list at the top of the page, select the target project.
+
+   Create a new project if you have not created any projects yet. For more information, see [Create project](../operations/project-operation/project_management.md).
+
+3. In the left navigation panel, click **Data Assets** > **Data Source**.
+
+4. Click **Add data source**. In the **Add New Source** dialog box, select **Hive** and click **Next**.![](images/hive_datasource.png)
+
+5. Select the target database/table (use the filter to quickly locate), and then click **Load**.   
+
+6. Below we will use Kylin's [sample dataset](../quickstart/sample_dataset.md) to show how to load Hive data source.
+
+   ![](images/select_dataset.png)
+
+   > [!NOTE]
+   >
+   > [Data sampling](data_sampling.md) is enabled at the table level by default. Kylin uses table sampling to collect statistical information of source tables, such as column cardinality and formats, so you can check the distribution for column values for better model designing.
+
+### FAQ
+
+- Question: Why cannot I find the Hive database/table prepared in advance during data loading?
+
+  Answer: Kylin will obtain the source table metadata periodically. If a table cannot be found during data loading, it's likely that the source table metadata is changed. To solve this issue, click **Refresh now** so the system can get the latest metadata information.
+
+- Question: How to load Hive transactional tables? 
+
+  Answer: Before loading, you need to add `kylin.build.resource.read-transactional-table-enabled=true` to the configuration file, and configure parameter `kylin.source.hive.beeline-params`. For more information, see [Configure basic parameters](../configuration/configuration.md). 
+
+- Question: Besides Hive data source, what other data sources does Kylin support?
+
+  Answer: Kylin supports various data sources, including Hive and [Kakfa](import_kafka.md). Meanwhile, you can also [connect to other data sources by extending the data source SDK](../developer/rdbms_sdk/rdbms_sdk.md), for example, GBase. 
+
+
+
+### Next step 
+
+[Model](../modeling/intro.md)
+
diff --git a/website/docs/datasource/intro.md b/website/docs/datasource/intro.md
new file mode 100644
index 0000000000..84f1a87a31
--- /dev/null
+++ b/website/docs/datasource/intro.md
@@ -0,0 +1,26 @@
+---
+title: Load data source
+language: en
+sidebar_label: Load data source
+pagination_label: Load data source
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - load data source
+    - data source
+    - datasource
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+
+The Kylin supports the integration of multiple data sources, such as Hive. You can connect different data sources to the platform and expose a unified query interface using Kylin, which shields the technical details of different data sources. It also creates a uniform business semantic layer that frees users from concerns about the technical complexity and implementation of the underlying data source.
+
+Click the links below to learn how to load different types of data sources.
+
+- [Hive data source](import_hive.md)
+
+
diff --git a/website/docs/deployment/on-premises/prerequisite.md b/website/docs/deployment/on-premises/prerequisite.md
index 7285b35bed..56237cf317 100644
--- a/website/docs/deployment/on-premises/prerequisite.md
+++ b/website/docs/deployment/on-premises/prerequisite.md
@@ -29,7 +29,6 @@ Prior to installing Kylin, please check the following prerequisites are met.
     - [Recommended Hardware Configuration](#hardware)
     - [Recommended Linux Distribution](#linux)
     - [Recommended Client Configuration](#client)
-- [License](#license)
 
 
 
diff --git a/website/docs/modeling/data_modeling.md b/website/docs/modeling/data_modeling.md
new file mode 100755
index 0000000000..052b77c27a
--- /dev/null
+++ b/website/docs/modeling/data_modeling.md
@@ -0,0 +1,64 @@
+---
+title: Model design overview
+language: en
+sidebar_label: Model design overview
+pagination_label: Model design overview
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+  - model design overview
+draft: true
+last_update:
+  date: 08/17/2022
+---
+
+Kylin model consists of multiple tables and their join relations. Kylin follows multidimensional modeling theory to build star or snowflake schemas based on tables. Kylin also leverages pre-computation technique and will reuse the pre-computed results to answer queries, so there's no need to traverse all data when there is a query, thus achieving sub-second query times on PB-level data.
+
+### Challenges when dealing with massive data
+
+When analyzing massive data, there are some techniques to speed up computing and storage, but they cannot change the time complexity of query, that is, query latency and data volume are linearly dependent. 
+
+If it takes 1 minute to query 100 million entries of data records, querying 10 billion data entries will take about 1 hour and 40 minutes. When companies want to analyze all business data piled up over the years or to add complexity to query, say, with more dimensions, queries will be running extremely slow or even time out. 
+
+### Accelerate query with Kylin pre-computation
+
+Kylin leverages pre-computation to avoid the computing pressure brought by the growing data volume. That is, Kylin will precompute the combinations of defined model dimensions and then store the aggregated results as indexes to shorten query latency. In addition, Kylin uses parallel computing and columnar storage techniques to improve computing and storage speed.  
+
+![Reduce IO](images/reduceio.png)
+
+
+With pre-computation, the number of indexes will be determined by the dimension cardinality only, and will no longer undergo exponential growth as data volume increases. Taking the data analysis of online transactions as an example, with Kylin pre-computation, even if the volume of transaction data increases by 10 times, the query speed against the same analytical dimensions changes little. The computing time complexity can be kept at O(1), helping enterprises to analyze data more efficiently. 
+
+![Response Time Datavolume](images/responsetime_datavolume.png)
+
+### How to design model and indexes
+
+#### Manual modeling 
+
+In addition to intelligent modeling, Kylin also supports users to design their own models and indexes based on their business needs. Kylin provides step-by-step guidance on how to complete basic model settings, including dimensions, measures, join relationships, and indexes. For details, see [Manual modeling](../../model/manual_modeling.en.md). 
+
+#### Advanced model design 
+
+Kylin offers various advanced features around models and indexes to help users quickly dig out the most valuable data. These features include: 
+
+- Accelerated model design: Kylin offers built-in [advanced measures](measure_design/intro.md) like count distinct and Top N to speed up modeling.  
+
+For more information, see [Advanced model design](intro.md). 
+
+### Basic concepts 
+
+Kylin follows multidimensional modeling theory and decomposes complex concepts into specific functional modules to make modeling easier. Below are some of the basic concepts used in Kylin: 
+
+- Dimension: Dimension is a perspective of viewing data, which can be used to describe object attributes or characteristics, for example, product category.
+
+- Measure: Measure is an aggregated sum, which is usually a continuous value, for example, product sales. 
+
+- Model: Model consists of multiple tables and their join relations, as well as defined dimensions and measures.
+
+- Pre-computation: Pre-computation is the process of aggregating data based on model dimension combinations and of storing the results as indexes to accelerate data query.
+
+- Index: Index is used to accelerate data query. Indexes are divided into:
+  - Aggregate Index: Aggregate index is an aggregated combination of multiple dimensions and measures, and can be used to answer aggregate queries such as total sales for a given year.
+  - Table Index: Table index is a multi-level index in a wide table and can be used to answer detailed queries such as the last 100 transactions of a certain user. 
diff --git a/website/docs/modeling/images/add_dimention.gif b/website/docs/modeling/images/add_dimention.gif
new file mode 100755
index 0000000000..ffecf678a3
Binary files /dev/null and b/website/docs/modeling/images/add_dimention.gif differ
diff --git a/website/docs/modeling/images/add_join_relations.png b/website/docs/modeling/images/add_join_relations.png
new file mode 100644
index 0000000000..c5ee67d123
Binary files /dev/null and b/website/docs/modeling/images/add_join_relations.png differ
diff --git a/website/docs/modeling/images/add_measure.png b/website/docs/modeling/images/add_measure.png
new file mode 100644
index 0000000000..15ab8d2932
Binary files /dev/null and b/website/docs/modeling/images/add_measure.png differ
diff --git a/website/docs/modeling/images/add_model_set.png b/website/docs/modeling/images/add_model_set.png
new file mode 100644
index 0000000000..97cb3168f6
Binary files /dev/null and b/website/docs/modeling/images/add_model_set.png differ
diff --git a/website/docs/modeling/images/add_tables.png b/website/docs/modeling/images/add_tables.png
new file mode 100644
index 0000000000..0070ac5e89
Binary files /dev/null and b/website/docs/modeling/images/add_tables.png differ
diff --git a/website/docs/modeling/images/create_join_relations.png b/website/docs/modeling/images/create_join_relations.png
new file mode 100644
index 0000000000..1b6720f735
Binary files /dev/null and b/website/docs/modeling/images/create_join_relations.png differ
diff --git a/website/docs/modeling/images/model_list.png b/website/docs/modeling/images/model_list.png
new file mode 100644
index 0000000000..cc2bbbd064
Binary files /dev/null and b/website/docs/modeling/images/model_list.png differ
diff --git a/website/docs/modeling/images/model_overview/dimensions.png b/website/docs/modeling/images/model_overview/dimensions.png
new file mode 100644
index 0000000000..9528c0a163
Binary files /dev/null and b/website/docs/modeling/images/model_overview/dimensions.png differ
diff --git a/website/docs/modeling/images/model_overview/er.png b/website/docs/modeling/images/model_overview/er.png
new file mode 100644
index 0000000000..84119eb4db
Binary files /dev/null and b/website/docs/modeling/images/model_overview/er.png differ
diff --git a/website/docs/modeling/images/model_overview/measures.png b/website/docs/modeling/images/model_overview/measures.png
new file mode 100644
index 0000000000..9d9e8a63c9
Binary files /dev/null and b/website/docs/modeling/images/model_overview/measures.png differ
diff --git a/website/docs/modeling/images/model_overview/unfold_model.png b/website/docs/modeling/images/model_overview/unfold_model.png
new file mode 100644
index 0000000000..6ba03f4df0
Binary files /dev/null and b/website/docs/modeling/images/model_overview/unfold_model.png differ
diff --git a/website/docs/modeling/images/modellist_more_info.png b/website/docs/modeling/images/modellist_more_info.png
new file mode 100644
index 0000000000..b47f6b9be2
Binary files /dev/null and b/website/docs/modeling/images/modellist_more_info.png differ
diff --git a/website/docs/modeling/images/reduceio.png b/website/docs/modeling/images/reduceio.png
new file mode 100644
index 0000000000..2eb123fb1e
Binary files /dev/null and b/website/docs/modeling/images/reduceio.png differ
diff --git a/website/docs/modeling/images/responsetime_datavolume.png b/website/docs/modeling/images/responsetime_datavolume.png
new file mode 100644
index 0000000000..78086afd35
Binary files /dev/null and b/website/docs/modeling/images/responsetime_datavolume.png differ
diff --git a/website/docs/modeling/images/save_load_method.png b/website/docs/modeling/images/save_load_method.png
new file mode 100644
index 0000000000..8158552f50
Binary files /dev/null and b/website/docs/modeling/images/save_load_method.png differ
diff --git a/website/docs/modeling/images/star_model_created.png b/website/docs/modeling/images/star_model_created.png
new file mode 100644
index 0000000000..db546dbd4e
Binary files /dev/null and b/website/docs/modeling/images/star_model_created.png differ
diff --git a/website/docs/modeling/images/switch_to_fact_table.png b/website/docs/modeling/images/switch_to_fact_table.png
new file mode 100644
index 0000000000..f6b5e48dff
Binary files /dev/null and b/website/docs/modeling/images/switch_to_fact_table.png differ
diff --git a/website/docs/modeling/images/time_format.png b/website/docs/modeling/images/time_format.png
new file mode 100644
index 0000000000..690c8c95fc
Binary files /dev/null and b/website/docs/modeling/images/time_format.png differ
diff --git a/website/docs/modeling/load_data/build_index.md b/website/docs/modeling/load_data/build_index.md
new file mode 100755
index 0000000000..5bc92388ab
--- /dev/null
+++ b/website/docs/modeling/load_data/build_index.md
@@ -0,0 +1,47 @@
+---
+title: Build Index
+language: en
+sidebar_label: Build Index
+pagination_label: Build Index
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - build index
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+As the business scenario changes, some of the indexes in the model need to be retained only in latest months for saving building and storage costs. Therefore, Kyligence provides a more flexible way to build indexes since the 4.2 released.
+
+
+### Build Index
+
+In the **Index Overview** tab, we can see the index list and some basic information. In the **Index List**, we can filter some indexes by the keyword or ids and then only build them in selected segments. For example, some new columns are added in the source table because of the business demands. Therefore, we need to add some new indexes for those columns in the latest one month for analysis and cost saving. As shown in the figure below, we can select all the new and NO BUILD indexes, an [...]
+
+![Build Index](images/load_by_date/build_index.png)
+
+After that, please select the segment with the latest month and click the Build Index button to generate the building job. If you want to build the segments concurrently to improve the efficiency, you can also check the **Generate multiple segments in parallel** box. Then, the system will generate multiple jobs according to the number of selected segments.
+
+![Build Index](images/load_by_date/build_index_by_segment.png)
+
+### Delete Index
+
+Similar to the building index, you can also delete some indexes in selected segments. For example, deleting some low frequent usage indexes in last year. As shown below, we can choose some of the indexes and click the **Delete** button to choose delete from all segments or parts of them.
+
+Note: If the indexes are deleted from segments, it may influence the query performance because some of query may route to the pushdown engine due to the lack of index.
+
+![Delete Index](images/load_by_date/delete_index.png)
+
+### Build All Index
+
+To support more flexible index building, it may expect that different indexes will be included in different segments. In order to ensure the stable query performance, we recommend you build all index among all segments after a period of time. Therefore, if the index is incomplete, we can quickly build all indexes by clicking the icon after the index data range.
+
+![Build All Index](images/load_by_date/build_all_index.png)
+
+As shown below, all the segments with incomplete indexes will be shown after clicked the icon. Then, you can select all the segments and click **Build Index** to ensure segments with all indexes.
+
+![Build All Index](images/load_by_date/build_all_index2.png)
+
diff --git a/website/docs/modeling/load_data/by_date.md b/website/docs/modeling/load_data/by_date.md
new file mode 100755
index 0000000000..32cef99cec
--- /dev/null
+++ b/website/docs/modeling/load_data/by_date.md
@@ -0,0 +1,68 @@
+---
+title: Incremental Load
+language: en
+sidebar_label: Incremental Load
+pagination_label: Incremental Load
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - incremental load
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+As your business data grows over time, you can choose to load data by date / time. The main contents are as follows:
+
+### <span id="expert">Load by Date/Time</span>
+
+If you have set a time partition column for your model, you can load data by date / time each time on the **Data Asset -> Model** page.
+
+1. **First Load**
+   Open Kylin web UI and access the project. Select the model that needs to load in the model list page. 
+
+   **Step 1:** Click the **Build Index** button.
+
+   ![Load Data](images/load_by_date/load_data.png)
+
+   **Step 2:**  Select the load range in the pop-up window and click the **Incremental Load** button. This action will trigger the job of **Load Data**.
+
+   >  **Note**: 
+   >
+   > - You can click the icon on the right side of the load range to automatically detect the latest available range. When your mouse is hovering over the Icon **Detect available range** is displayed.
+   > - When you load historical data at the first time and the data volume is too large, it may lead to a long loading time. Please be sure to set the load range according to your data volume, model complexity, and available resources.
+
+   ![Load Range](images/load_by_date/notice_2.png)
+
+
+
+### Known limitations
+The start time range of Segment should greater than `1970-01-02`.
+
+
+   **Step 3:** After submission, go to the **Monitor -> Job** page, a list of running jobs will be displayed. The first job is the job we just submitted and **Data Range** is displayed as the selected load range in step 2.
+
+   **Step 4:** When all steps are complete, the status of the job will become **Finished**. You can view the details in the model list. There is a Segment in the **Segment** tag and it is marked the **Start Time** and **End Time**.
+
+   ![Load Data](images/load_by_date/load_ok.png)
+
+2. **Incremental Load**
+
+   After the first segment is built, we can build more segments incrementally to accommodate new data. The time range of two segments cannot overlap.
+
+   The steps for incremental load are consistent with the steps described above. Click the **Build Index** button then select the load range in the pop-up window. To ensure continuity, a new segment always starts from the end of the last segment. 
+
+   When load completes, go to the model detail page and check there are two segments under the model.
+
+   ![Load by Date/Time](images/load_by_date/load_twice.png)
+
+3. **Add Segment**
+   
+   Besides the above methods, you can also increase the model data range by adding a new segment. Click the **+ Segment** button under segment list. Then click the **Save and Build Index** button in the pop-up window. After that, a building job will be submitted and the third segment will be shown when the job is finished.
+
+   Note: If you want to use this function, you need to enable **Creating Reserved Segments**. Please refer to [Project Settings](../../operations/project-operation/project_settings.md) for more information.
+   
+   ![Add segment](images/load_by_date/add_segment.png)
+   ![Segment list](images/load_by_date/model_list_3.png)
diff --git a/website/docs/modeling/load_data/full_build.md b/website/docs/modeling/load_data/full_build.md
new file mode 100644
index 0000000000..dc4adcf4ee
--- /dev/null
+++ b/website/docs/modeling/load_data/full_build.md
@@ -0,0 +1,46 @@
+---
+title: Full Load
+language: en
+sidebar_label: Full Load
+pagination_label: Full Load
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - full load
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+
+## Full Load
+
+If you want to load all the data in the source table, you can choose full load. The main contents are as follows:
+
+- [<span id="expert">Full Load</span>](#full-load-in-ai-augmented-mode-project)
+
+### <span id="expert">Full Load</span>
+
+If you do not set a time partition column for your model, it will be full load each time.
+
+You cannot merge segments in a full load type model since there should be only one segment.
+
+Here we will introduce how to do a full load in the Web UI:
+
+1. Select the model that needs the full load in the model list. Click the **Build Index** button.
+
+   ![Load Data](images/full_load/load_data.png)
+
+2. You will be prompted to load all data, including the data already loaded in the model.
+
+   ![Notice](images/full_load/notice.png)
+
+   > **Note**: If you load data for a model for the first time, the storage size in the above prompt will be 0.00 KB because the model has not been loaded (there is no data in the model). 
+
+3. After that, you can view the build index job via the **Monitor -> Job** page.
+
+4. When the data is loaded, you can view the details in the model list. There is only one Segment in the **Segment** tag, and it is marked as full load.
+
+   ![Full Load](images/full_load/full_load.png)
diff --git a/website/docs/modeling/load_data/images/build_empty_index_en.png b/website/docs/modeling/load_data/images/build_empty_index_en.png
new file mode 100644
index 0000000000..1afc423fb3
Binary files /dev/null and b/website/docs/modeling/load_data/images/build_empty_index_en.png differ
diff --git a/website/docs/modeling/load_data/images/build_summary.png b/website/docs/modeling/load_data/images/build_summary.png
new file mode 100644
index 0000000000..2130345de0
Binary files /dev/null and b/website/docs/modeling/load_data/images/build_summary.png differ
diff --git a/website/docs/modeling/load_data/images/by_date/buildcube_0.png b/website/docs/modeling/load_data/images/by_date/buildcube_0.png
new file mode 100644
index 0000000000..46061548e2
Binary files /dev/null and b/website/docs/modeling/load_data/images/by_date/buildcube_0.png differ
diff --git a/website/docs/modeling/load_data/images/by_date/buildcube_1.png b/website/docs/modeling/load_data/images/by_date/buildcube_1.png
new file mode 100644
index 0000000000..c836f0390c
Binary files /dev/null and b/website/docs/modeling/load_data/images/by_date/buildcube_1.png differ
diff --git a/website/docs/modeling/load_data/images/by_date/buildcube_2.png b/website/docs/modeling/load_data/images/by_date/buildcube_2.png
new file mode 100644
index 0000000000..64925b4136
Binary files /dev/null and b/website/docs/modeling/load_data/images/by_date/buildcube_2.png differ
diff --git a/website/docs/modeling/load_data/images/by_date/buildcube_3.png b/website/docs/modeling/load_data/images/by_date/buildcube_3.png
new file mode 100644
index 0000000000..18a538ae8c
Binary files /dev/null and b/website/docs/modeling/load_data/images/by_date/buildcube_3.png differ
diff --git a/website/docs/modeling/load_data/images/customize_build_save_model.png b/website/docs/modeling/load_data/images/customize_build_save_model.png
new file mode 100644
index 0000000000..0506f400ce
Binary files /dev/null and b/website/docs/modeling/load_data/images/customize_build_save_model.png differ
diff --git a/website/docs/modeling/load_data/images/customize_build_save_model_partition.png b/website/docs/modeling/load_data/images/customize_build_save_model_partition.png
new file mode 100644
index 0000000000..decef08704
Binary files /dev/null and b/website/docs/modeling/load_data/images/customize_build_save_model_partition.png differ
diff --git a/website/docs/modeling/load_data/images/full_build.png b/website/docs/modeling/load_data/images/full_build.png
new file mode 100644
index 0000000000..518764c362
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_build.png differ
diff --git a/website/docs/modeling/load_data/images/full_build_cube.png b/website/docs/modeling/load_data/images/full_build_cube.png
new file mode 100644
index 0000000000..2f19ee8621
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_build_cube.png differ
diff --git a/website/docs/modeling/load_data/images/full_build_segment_info.png b/website/docs/modeling/load_data/images/full_build_segment_info.png
new file mode 100644
index 0000000000..2888af1daf
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_build_segment_info.png differ
diff --git a/website/docs/modeling/load_data/images/full_load/full_load.png b/website/docs/modeling/load_data/images/full_load/full_load.png
new file mode 100644
index 0000000000..edf962a876
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_load/full_load.png differ
diff --git a/website/docs/modeling/load_data/images/full_load/full_load_smart.png b/website/docs/modeling/load_data/images/full_load/full_load_smart.png
new file mode 100644
index 0000000000..9b90493bf6
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_load/full_load_smart.png differ
diff --git a/website/docs/modeling/load_data/images/full_load/load_data.png b/website/docs/modeling/load_data/images/full_load/load_data.png
new file mode 100644
index 0000000000..752c5e2c57
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_load/load_data.png differ
diff --git a/website/docs/modeling/load_data/images/full_load/load_data_smart.png b/website/docs/modeling/load_data/images/full_load/load_data_smart.png
new file mode 100755
index 0000000000..f864f9de69
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_load/load_data_smart.png differ
diff --git a/website/docs/modeling/load_data/images/full_load/notice.png b/website/docs/modeling/load_data/images/full_load/notice.png
new file mode 100644
index 0000000000..411f62dc1d
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_load/notice.png differ
diff --git a/website/docs/modeling/load_data/images/full_load/notice_smart.png b/website/docs/modeling/load_data/images/full_load/notice_smart.png
new file mode 100644
index 0000000000..89f3aa3312
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_load/notice_smart.png differ
diff --git a/website/docs/modeling/load_data/images/full_load/refresh.png b/website/docs/modeling/load_data/images/full_load/refresh.png
new file mode 100644
index 0000000000..bef6076dc1
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_load/refresh.png differ
diff --git a/website/docs/modeling/load_data/images/full_load/refresh_smart.png b/website/docs/modeling/load_data/images/full_load/refresh_smart.png
new file mode 100644
index 0000000000..1af4d4945c
Binary files /dev/null and b/website/docs/modeling/load_data/images/full_load/refresh_smart.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/add_segment.png b/website/docs/modeling/load_data/images/load_by_date/add_segment.png
new file mode 100644
index 0000000000..2de3a5ad94
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/add_segment.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/build_all_index.png b/website/docs/modeling/load_data/images/load_by_date/build_all_index.png
new file mode 100644
index 0000000000..eed0ad4e55
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/build_all_index.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/build_all_index2.png b/website/docs/modeling/load_data/images/load_by_date/build_all_index2.png
new file mode 100644
index 0000000000..a624daf368
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/build_all_index2.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/build_index.png b/website/docs/modeling/load_data/images/load_by_date/build_index.png
new file mode 100644
index 0000000000..1f2a47b2f2
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/build_index.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/build_index_by_segment.png b/website/docs/modeling/load_data/images/load_by_date/build_index_by_segment.png
new file mode 100644
index 0000000000..c6d91f78f1
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/build_index_by_segment.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/delete_index.png b/website/docs/modeling/load_data/images/load_by_date/delete_index.png
new file mode 100644
index 0000000000..53a221f791
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/delete_index.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/job.png b/website/docs/modeling/load_data/images/load_by_date/job.png
new file mode 100644
index 0000000000..2d7a39a69f
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/job.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/job_smart.png b/website/docs/modeling/load_data/images/load_by_date/job_smart.png
new file mode 100644
index 0000000000..2d7a39a69f
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/job_smart.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/load_data.png b/website/docs/modeling/load_data/images/load_by_date/load_data.png
new file mode 100644
index 0000000000..77dfb17fad
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/load_data.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/load_ok.png b/website/docs/modeling/load_data/images/load_by_date/load_ok.png
new file mode 100644
index 0000000000..457638532b
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/load_ok.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/load_ok_2.png b/website/docs/modeling/load_data/images/load_by_date/load_ok_2.png
new file mode 100644
index 0000000000..f271a73cc6
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/load_ok_2.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/load_ok_smart.png b/website/docs/modeling/load_data/images/load_by_date/load_ok_smart.png
new file mode 100644
index 0000000000..120d8ffb20
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/load_ok_smart.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/load_twice.png b/website/docs/modeling/load_data/images/load_by_date/load_twice.png
new file mode 100644
index 0000000000..347f831cbf
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/load_twice.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/model_list_3.png b/website/docs/modeling/load_data/images/load_by_date/model_list_3.png
new file mode 100644
index 0000000000..77fea3f47d
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/model_list_3.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/notice_2.png b/website/docs/modeling/load_data/images/load_by_date/notice_2.png
new file mode 100644
index 0000000000..995b059a0e
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/notice_2.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/range_en.png b/website/docs/modeling/load_data/images/load_by_date/range_en.png
new file mode 100644
index 0000000000..5526316984
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/range_en.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/range_smart_2_en.png b/website/docs/modeling/load_data/images/load_by_date/range_smart_2_en.png
new file mode 100644
index 0000000000..6dbaa7be76
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/range_smart_2_en.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/range_smart_en.png b/website/docs/modeling/load_data/images/load_by_date/range_smart_en.png
new file mode 100644
index 0000000000..822fb51206
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/range_smart_en.png differ
diff --git a/website/docs/modeling/load_data/images/load_by_date/source_smart_en.png b/website/docs/modeling/load_data/images/load_by_date/source_smart_en.png
new file mode 100644
index 0000000000..7490c836cc
Binary files /dev/null and b/website/docs/modeling/load_data/images/load_by_date/source_smart_en.png differ
diff --git a/website/docs/modeling/load_data/images/segment_2.png b/website/docs/modeling/load_data/images/segment_2.png
new file mode 100644
index 0000000000..be65246ae4
Binary files /dev/null and b/website/docs/modeling/load_data/images/segment_2.png differ
diff --git a/website/docs/modeling/load_data/images/streaming/g.png b/website/docs/modeling/load_data/images/streaming/g.png
new file mode 100644
index 0000000000..c049b4e885
Binary files /dev/null and b/website/docs/modeling/load_data/images/streaming/g.png differ
diff --git a/website/docs/modeling/load_data/images/streaming/h.png b/website/docs/modeling/load_data/images/streaming/h.png
new file mode 100644
index 0000000000..907b67368e
Binary files /dev/null and b/website/docs/modeling/load_data/images/streaming/h.png differ
diff --git a/website/docs/modeling/load_data/images/streaming/i.png b/website/docs/modeling/load_data/images/streaming/i.png
new file mode 100644
index 0000000000..29a7805ea2
Binary files /dev/null and b/website/docs/modeling/load_data/images/streaming/i.png differ
diff --git a/website/docs/modeling/load_data/images/streaming/j.png b/website/docs/modeling/load_data/images/streaming/j.png
new file mode 100644
index 0000000000..4432bba537
Binary files /dev/null and b/website/docs/modeling/load_data/images/streaming/j.png differ
diff --git a/website/docs/modeling/load_data/images/streaming/k.png b/website/docs/modeling/load_data/images/streaming/k.png
new file mode 100644
index 0000000000..62b15380d8
Binary files /dev/null and b/website/docs/modeling/load_data/images/streaming/k.png differ
diff --git a/website/docs/modeling/load_data/images/streaming/l.png b/website/docs/modeling/load_data/images/streaming/l.png
new file mode 100644
index 0000000000..8187663277
Binary files /dev/null and b/website/docs/modeling/load_data/images/streaming/l.png differ
diff --git a/website/docs/modeling/load_data/images/streaming/m.png b/website/docs/modeling/load_data/images/streaming/m.png
new file mode 100644
index 0000000000..bc47bd73ad
Binary files /dev/null and b/website/docs/modeling/load_data/images/streaming/m.png differ
diff --git a/website/docs/modeling/load_data/images/streaming/n.png b/website/docs/modeling/load_data/images/streaming/n.png
new file mode 100644
index 0000000000..1297831d51
Binary files /dev/null and b/website/docs/modeling/load_data/images/streaming/n.png differ
diff --git a/website/docs/modeling/load_data/images/time_axis.png b/website/docs/modeling/load_data/images/time_axis.png
new file mode 100644
index 0000000000..9a7b7d7d15
Binary files /dev/null and b/website/docs/modeling/load_data/images/time_axis.png differ
diff --git a/website/docs/modeling/load_data/images/time_axis_customize_build.png b/website/docs/modeling/load_data/images/time_axis_customize_build.png
new file mode 100644
index 0000000000..5b2a7bc375
Binary files /dev/null and b/website/docs/modeling/load_data/images/time_axis_customize_build.png differ
diff --git a/website/docs/modeling/load_data/intro.md b/website/docs/modeling/load_data/intro.md
new file mode 100644
index 0000000000..dad7b871d2
--- /dev/null
+++ b/website/docs/modeling/load_data/intro.md
@@ -0,0 +1,28 @@
+---
+title: Load Data
+language: en
+sidebar_label: Load Data
+pagination_label: Load Data
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - load data
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+
+## Load Data
+
+Load data calculates the source data based on the model and index definition. This chapter takes the sample data as an example to introduce two data load methods and processes:
+
+- full load
+- incremental load by date/time
+
+This chapter also introduces segment operations and settings, which are used to manage Segments.
+
+- Segment operation and settings
+  - Segment Merge
diff --git a/website/docs/modeling/load_data/segment_operation_settings/images/segment.png b/website/docs/modeling/load_data/segment_operation_settings/images/segment.png
new file mode 100644
index 0000000000..8596946053
Binary files /dev/null and b/website/docs/modeling/load_data/segment_operation_settings/images/segment.png differ
diff --git a/website/docs/modeling/load_data/segment_operation_settings/intro.md b/website/docs/modeling/load_data/segment_operation_settings/intro.md
new file mode 100644
index 0000000000..0d4b0ddea5
--- /dev/null
+++ b/website/docs/modeling/load_data/segment_operation_settings/intro.md
@@ -0,0 +1,94 @@
+---
+title: Segment Operation and Settings
+language: en
+sidebar_label: Segment Operation and Settings
+pagination_label: Segment Operation and Settings
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - segment operation and settings
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+Model (index group) consists of one or more segments. Each segment contains a range of data. Segment is created by building index or loading data with a selected data range on the partition columns.
+
+The main contents are as follows:
+
+
+### <span id="view">View Segment</span>
+
+User can access the Segment management interface by following these steps:
+
+1. Open **Data Asset -> Model** page, and click the model (index group) name.
+2. Select **Segment** tag.
+
+The model list page in the AI augmented mode project is shown as below.
+
+![segment](images/segment.png)
+
+Field description of the Segment list:
+
+- Start time: The start time of the data in the Segment. If it is loaded in full **"Full Load"** is displayed.
+
+- End time: The end time of the data in the Segment. If it is loaded in full **"Full Load"** is displayed.
+
+- Index: Indexes in this segment / Total indexes
+
+- Status: Segment status. You can find a detailed introduction [Segment Status](#status) section.
+
+- Last Updated Time: Segment last updated time.
+
+- Source Records: The source records of the data in the segment.
+
+- Storage: The storage size of the data in the segment.
+
+  > Tip: When the tiered storage is turned on, the storage size of the data loaded into the tiered storage (ClickHouse) will be displayed.
+
+- Actions: The operation of the segment. Currently only **Show Detail** is supported.
+
+
+
+### <span id="status">Segment Status</span>
+
+You can view the segment status in the segment list. There are 6 types of segment statuses. See below:
+
+- **ONLINE**: Segment can serve the query by indexes loaded data or pushdown engine.
+- **WARNING**: The data in the segment has been loaded and can serve the query. However, the source data might be changed which might cause the data inconsistent. It's highly recommended to refresh all indexes within this segment.
+- **LOCKED**: Segments that are refreshing or merging will be locked.
+- **LOADING**: The data in the segment is loading.
+- **REFRESHING**: A new segment is automatically generated when you refresh the specified segment. This new segment is marked as *REFRESHING*. When the refresh is complete the old segment will be automatically deleted.
+- **MERGING**: A new segment is automatically generated when you merge the specified segments. This new segment is marked as *MERGING*. When the merge is complete the old segment will be automatically deleted.
+
+
+
+### <span id="expert_operation">Segment Operation</span>
+
+You have 6 types of segment operations on the **Data Asset -> model **page.
+
+- **+ Segment**: Add segments to define the model’s data range for serving queries. Queries within the range could be answered by indexes or pushdown engine. Queries out of the range would have no results. The button is located above the segment list.
+
+> **Note:** In **Setting -> Basic Settings -> Segment Settings**, enable **Creating Reserved Segments**, then the **+ Segment** operation button will appear.
+
+- **Show Detail**: You can click the icon on the right side of the segment list. When your mouse is hovering over the icon **Show Detail** is displayed. You can view details such as storage size, the data range and more.
+
+- **Refresh**: Refresh the data in the segment. This operation supports batch refresh. The **Refresh** button is located above the segment list.
+
+    >**Note**: Only ONLINE and WARNING status segments can be refreshed.
+
+- **Merge**:Merge multiple segments as a new one.  The **Merge** button is located above the segment list.
+  
+    > **Note**: Only ONLINE and WARNING status segments can be merged.
+  
+- **Delete**: Delete the segment. This operation supports batch deletion. The **Delete** button is located above the segment list.
+  
+- **Fix**: Fix the discontinuous segments. This button will be only displayed above the Segment list when the holes exists in Segment ranges.
+
+
+
+### <span id="setting"> Segment Settings</span>
+
+You can set some to manage segments automatically in the **Setting -> Segment Settings** page. Please refer to [Project Settings](../../../operations/project-operation/project_settings.md) for the specific requirements.
diff --git a/website/docs/modeling/load_data/segment_operation_settings/segment_merge.md b/website/docs/modeling/load_data/segment_operation_settings/segment_merge.md
new file mode 100644
index 0000000000..1e91167c59
--- /dev/null
+++ b/website/docs/modeling/load_data/segment_operation_settings/segment_merge.md
@@ -0,0 +1,163 @@
+---
+title: Segment Merge
+language: en
+sidebar_label: Segment Merge
+pagination_label: Segment Merge
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - segment merge
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+In the incremental build mode, as the number of segments increases, the system may need to aggregate multiple segments to serve the query, which degrades the query performance and the query performance decreases. At the same time, a large number of small files will put pressure on the HDFS Namenode and affect the HDFS performance. Kyligence Enterprise provides a mechanism to control the number of segments - **Segments Merge **.
+
+
+### <span id="manual">Manual Merge</span>
+
+You can merge multiple Segments in the Web GUI or using **Segment Manage API**.
+
+In the web GUI
+1. In the Data Assets -> Model -> Segment list, select the Segments to be merged.
+2. Click "Merge" in the drop-down list, check that three conditions are met (consistent indexes, consistent sub-partition values, and continuous time ranges) , and submit the merge task.
+   The system submits a task of type "Merge Data". Until the task is completed, the original segment is still available. After the task is completed, it will be replaced by a new segment. To save system resources, the original segments will be recycled and cleaned up.
+
+### <span id="auto">Auto Merge</span>
+
+Merging Segments is very simple, but requires manual triggering of the merge from time to time. When there are multiple projects and models in the production environment, it becomes very cumbersome to trigger the merge operation one by one. Therefore, Kyligence Enterprise provides a segment automatic merging solution.
+- [Auto-Merge settings](#setting)
+- [Auto-merge strategy](#strategy)
+- [Choose Segment](#choose)
+- [Try Merge](#trymerge)
+- [Notice](#notice)
+
+#### <span id="setting">Auto-Merge settings</span>
+
+According to different business needs, it supports the automatic merging of project and model settings respectively. If the two merge strategies are different, the system adopts the model-level settings.
+- Project-level: Used for all models in a project, with the same merge strategy.
+- Model-level: used for multiple models in a project, with different automatic merging strategies.
+
+Please refer to **Segment Settings** and **Model/Index Group Rewrite Settings** of [Project Settings](../../../operations/project-operation/project_settings.md) for the specific requirements.
+
+#### <span id="strategy">Auto-merge strategy</span>
+- Merge Timing: The system triggers an automatic merge attempt every time a new segment in the project becomes complete. To ensure query performance, all segments will not be merged at once.
+
+- Time Threshold: Allows the user to set a time threshold of up to 6 layers. The larger the layer, the larger the time threshold. The user can select multiple levels (eg week, month).
+  Note: day, week and month represent natural day, natural week and natural month respectively.
+
+  
+  
+  
+  | level | Time Threshold |
+  | ----- | -------------- |
+  | 1     | hour           |
+  | 2     | day            |
+  | 3     | week           |
+  | 4     | month          |
+  | 5     | quarter        |
+  | 6     | year           |
+
+#### <span id="choose">Choose Segment</span>
+
+When triggering an Auto-Merge, the system attempts to start from maximum layer time threshold, skips segments whose time length is greater than or equal to the threshold, select remaining eligible Segments (consistent indexes, consistent sub-partition values, and continuous time ranges).
+
+#### <span id="trymerge">Try Merge</span>
+When the total time length of the segments reaches the time threshold, they will be merged. After the merge task is completed, the system will trigger an Auto- Merge attempt again; otherwise, the system repeats the search process using the time threshold for the next level. Stop trying until all the selected levels have no segment that meets the condition .
+
+#### <span id="notice">Notice</span>
+- The Auto-Merge of week is constrained by month, that is, if a natural week spans months/quarters/years, they are merged separately. (see example 2).
+- During the process of merging segments, the HDFS storage space may exceed the threshold limit, causing the merging to fail.
+
+
+### <span id="example">Example of Auto Merge</span>
+
+- [Example 1](#ex1)
+- [Example 2](#ex2)
+
+#### <span id="ex1">Example 1</span>
+The switch for Auto-Merge is turned on, and the specified time thresholds are week and month. There are six consecutive Segments A~F.
+
+
+
+| Segment (Initial) | Time Range              | Time Length |
+| ----------------- | ----------------------- | ----------- |
+| A                 | 2022-01-01 ~ 2022-01-31 | 1 month     |
+| B                 | 2022-02-01 ~ 2022-02-06 | 1 week      |
+| C                 | 2022-02-07 ~ 2022-02-13 | 1 week      |
+| D                 | 2022-02-14 ~ 2022-02-20 | 1 week      |
+| E                 | 2022-02-21 ~2022-02-25  | 5 days      |
+| F                 | 2022-02-26 Saturday     | 1 day       |
+
+Segment G was added later (Sunday 2022-02-27).
+
+- Now there are 7 segments A~G, the system first tries to merge by month, since Segment A's time length is greater than or equal to the threshold (1 month), it will be excluded. The following segments B-G add up to less than 1 month, do not meet the time threshold (1 month), and therefore cannot be merged by month.
+
+- The system will try the next level of time thresholds (i.e. merged by week). The system rescans all segments, finds that A, B, C, and D are all greater than or equal to the threshold (1 week), so they are skipped. The following segments E-G add up to the threshold (1 week) and merge into Segment X.
+
+- With the addition of segment X, the system will be triggered to restart the merge attempt, but the attempt will be terminated because the conditions for automatic merge have not been met.
+
+  
+
+| Segment(Add G,  Trigger Auto-Merge) | Time Range              | Time Length |
+| ------------------------------------- | ----------------------- | ----------- |
+| A                                     | 2022-01-01 ~ 2022-01-31 | 1 month      |
+| B                                     | 2022-02-01 ~ 2022-02-06 | 1 week        |
+| C                                     | 2022-02-07 ~ 2022-02-13 | 1 week        |
+| D                                     | 2022-02-14 ~ 2022-02-20 | 1 week        |
+| X(Orignal E-G)                      | 2022-02-21 ~ 2022-02-27 | 1 week        |
+
+Add Segment H  ( 2022-02-28)
+
+- Trigger the system to try to merge by month, all segments except A add up to the threshold (1 month), so B-H are merged into Segment Y.
+
+- With the addition of Segment Y, the system will trigger the merge attempt again, but the conditions for Auto-Merge have not been met, and the attempt is terminated.
+
+  
+
+| Segment(Add H,  Trigger Auto-Merge) | Time Range              | Time Length |
+| ------------------------------------- | ----------------------- | ----------- |
+| A                                     | 2022-01-01 ~ 2022-01-31 | 1 week      |
+| Y (Orignal B-H)                     | 2022-02-01 ~ 2022-02-28 | 1 week      |
+
+#### <span id="ex2">Example 2</span>
+There are six consecutive segments A~F, and their own time lengt are all 1 day. At this time, turn on the "auto merge" switch, specify the time threshold as weeks.
+
+
+
+| Segment (Initial) | Time Range           |
+| ----------------- | -------------------- |
+| A                 | Monday 2021-12-27    |
+| B                 | Tuesday 2021-12-28   |
+| C                 | Wednesday 2021-12-29 |
+| D                 | Thursday 2021-12-30  |
+| E                 | Friday 2021-12-31    |
+| FS                | Saturday 2022-01-01  |
+
+
+
+Then Segment G was added (Sunday 2022-01-02) with a duration of 1 day.
+
+- At this point there are 7 consecutive Segments, forming a natural week spanning 2 years. The system tries to merge by week, A-E is merged into a new Segment X.
+
+  
+
+| Segment(Add G,  Trigger 1st Auto-Merge) | Time Range                                 |
+| ----------------------------------------- | ------------------------------------------ |
+| X(Orignal A-E)                          | Monday to Friday (2021-12-27 ~ 2021-12-31) |
+| F                                         | Saturday 2022-01-01                        |
+| G                                         | Sunday 2022-01-02                          |
+
+- With the addition of Segment X, the system will be triggered to merge by week, so F-G will be merged into a new Segment Y.
+
+  
+
+| Segment(Add X,  Trigger 2nd Auto-Merge) | Time Range                                   |
+| ----------------------------------------- | -------------------------------------------- |
+| X(Orignal A-E)                          | Monday to Friday (2021-01-27 ~ 2021-01-31)   |
+| Y(Orignal F-G)                          | Saturday to Sunday (2022-02-01 ~ 2022-02-02) |
+
+- With the addition of Segment Y, the attempt to merge the system by week is triggered again. Now there are no segments with a duration of 1 week (in each year), so the attempt stops.
diff --git a/website/docs/modeling/manual_modeling.md b/website/docs/modeling/manual_modeling.md
new file mode 100644
index 0000000000..0a6bb8e9a7
--- /dev/null
+++ b/website/docs/modeling/manual_modeling.md
@@ -0,0 +1,210 @@
+---
+title: Manual modeling
+language: en
+sidebar_label: Manual modeling
+pagination_label: Manual modeling
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - manual modeling
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+Kylin follows multidimensional modeling theory when building star or snowflake models based on your tables. Kylin also leverages pre-computation technique and will reuse the pre-computed results to answer queries, so there is no need to traverse all data when there is a query, and thus achieve sub-second query times on PB-level data.
+
+### **Operation steps** 
+
+Kylin model consists of multiple tables and their join relations. In this article, we use this [SSB dataset](../quickstart/sample_dataset.md), a dataset based on real business applications, and hope to analyze products and supplier information from dimensions such as year, city, supplier name and brand. To achieve this goal, we will create a model in Kylin, and then set dimensions and measures for multidimensional analysis. See the table below for detailed steps. 
+
+| **Step**                                                     | **Description**                                              |
+| ------------------------------------------------------------ | ------------------------------------------------------------ |
+| Step 1: Create a model and add a fact table and dimension tables | In this step, we design the model, and then define the fact table and dimension tables to be analyzed, which will serve as data sources for later data analysis. |
+| Step 2: Create join relations among tables                   | Create join relations between the foreign keys of the fact table and the primary keys of the dimension tables to achieve join queries of the two tables. |
+| Step 3: Add dimensions and measures to the model             | Set the dimensions and measures for data analysis. Kylin will run pre-computation based on the combination of the defined dimensions and measures, which will greatly accelerate data query. |
+| Step 4: Save the model and set the loading method            | Save the model settings and specify the data loading method for the pre-computations step. If incremental load is selected, data within a specified time range will be loaded to improve loading efficiency. |
+
+### **Step 1: Create a model and add a fact table and dimension tables** 
+
+1. Log in to Kylin as any of the following roles:
+
+   - System admin 
+   - **Management** or **Admin** of the target project
+
+2. To create a model: 
+
+   1. In the left navigation panel, click **Data Asset** > **Model**.
+
+   2. Click **+ Model**. 
+
+   3. In the pop-up dialog box, enter a name and description for the model, and then click **Submit**.
+
+      Model name can be any combination of numbers, letters, and underscores (_). 
+
+3. You will be directed to the model editing page. On this page, add the fact table to the model. 
+
+   Fact table is used to store fact records, that is, to store data about a business process at the finest granularity, for example, product sales table. It often serves as the primary table of a model.
+
+   1. In the left-hand **Data Source** section, find the target fact table (**P_LINEORDER** in this example).  
+
+      > [!NOTE]
+      >
+      > If there is no table in the **Data Source** section, please [load data source](../datasource/intro.md) first.
+
+   2. Drag the target table to the right-hand canvas and select **Switch to Fact Table**.
+
+      ![](images/switch_to_fact_table.png)
+
+4. Add dimension tables to the model. 
+
+   Dimension table, also called lookup table, is used to store repeated attributes of the fact table, such as date and geographic location. Dimension tables can help to reduce the fact table size and improve dimension management efficiency. 
+
+   1. In the left-hand **Data Source** section, find the target dimension table. 
+
+   2. Drag the target table to the right-hand canvas. 
+
+      To add multiple dimension tables, repeat this step for each table. As shown below, one fact table and 4 dimension tables are added.
+
+      ![](images/add_tables.png)
+
+### **Step 2: Create join relations among tables** 
+
+1. On the model editing page, drag a column to create a join relation between the foreign key of the fact table and the primary key of the dimension table. 
+
+   ![](images/create_join_relations.png)
+
+2. In the **Add Join Relationship** dialog box, follow the instructions below to set the join relation. 
+
+   ![](images/add_join_relations.png)
+
+   - **Join Relationship for Tables**: It includes 3 drop-down lists. The first and the third one specify the tables to be joined, and the second one defines the join relation. Kylin currently supports **LEFT** (left join) and **INNER** (inner join). 
+
+   - **Table Relationship:** Select the mapping between the foreign and primary keys: **One-to-One or Many-to-One**, or **One-to-Many or Many-to-Many**.  
+
+   - **Precompute Join Relationship**: Select whether to expand the joined tables into a flat table based on the mappings. This option is selected by default. For more information about this function and its applicable scenarios, see [Precompute the join relations](precompute_join_relations.md). 
+
+   - **Join Relationship for Columns**: It includes 3 drop-down lists. The first and the third one specify the columns to be joined, and the second one defines the join relation, which is equal-join (=) by default. Join relations should meet the following requirements:  
+     - Do not define more than one join relation for the same column; two tables could only be joined by the same condition for one time
+     - Join relations for columns should include at least one equal-join condition (=)
+     - Join relations ≥ and < must be used in pairs, and the column in between must be the same. Example: B ≥ A, C < A
+
+3. Click **OK**.
+
+To create join relations for multiple tables, repeat steps 1-3 for each table. In this example, we create 4 join relations for tables, which constitute a star model. 
+
+![](images/star_model_created.png)
+
+Corresponding SQL statements:
+
+```SQL
+P_LINEORDER LEFT JOIN DATES ON P_LINEORDER.LO_ORDERDATE = DATES.D_DATEKEY
+P_LINEORDER LEFT JOIN CUSTOMER ON P_LINEORDER.LO_CUSTKEY = CUSTOMER.C_CUSTKEY
+P_LINEORDER LEFT JOIN SUPPLIER ON P_LINEORDER.LO_SUPPKEY = SUPPLIER.S_SUPPKEY
+P_LINEORDER LEFT JOIN PART ON P_LINEORDER.LO_PARTKEY = PART.P_PARTKEY
+```
+
+### **Step 3: Add dimensions and measures to the model**
+
+1. To add dimension tables to the model: 
+
+   1. On the model editing page, drag dimension columns from dimension tables to the **Dimension** section. 
+
+       To add dimensions in batch, click **+** in the **Dimension** section.
+
+       ![](images/add_dimention.gif)
+
+   2. In the pop-up dialog box, set the dimension name.
+
+       By default, it's the column name. It can be any combination of letters, numbers, spaces, and special characters `(_ -()%?)`. 
+
+   3. Click **OK**. 
+
+       In our example, we added year (D_YEAR in DATE), the city customer is in (CITY in CUSTOMER), supplier name (S_NAME in SUPPLIER), and brand (P_BRAND in PART) as dimensions.    
+
+2. Add measures to the model.
+
+   1. On the model editing page, drag dimension columns from dimension tables to the **Measure** section. 
+
+       To add dimensions in batch, click **+** in the **Measure** section.
+
+      ![](images/add_measure.png)
+
+   2. In the **Add Measure** dialog box, follow the instructions below to complete join relation settings.
+
+      - **Name**: Column name by default. It can be any combination of letters, numbers, spaces, and special characters `(_ -()%?)`. 
+
+      - **Function**: **SUM (column)** by default. Kylin has a variety of built-in basic and advanced functions, such as Count Distinct, TopN, etc. For more information, see [Advanced measures](model_design/advance_guide/intro.md).  
+
+      - **Column**: The measure column. No adjustment is needed. 
+
+      - **Note** (Optional): Enter notes to facilitate future measure management.
+
+   3. Click **Submit**. 
+
+      In our example, we added revenue (LO_REVENUE in P_LINEORDER) and supply cost (LO_SUPPLYCOST in P_LINEORDER) as measures, and wanted to calculate the sum for each.  
+
+3. (Optional) To achieve complex processing and computation based on the existing columns, you can add computed columns to the model. For more information, see [Computed columns](model_design/computed_column/intro.md).
+
+### Step 4: Save the model and set the loading method
+
+1. In the bottom right corner of the model editing page, click **Save.**
+
+2. In the **Save** dialog box, follow the instructions below to complete model settings.
+
+   ![](images/save_load_method.png)
+
+   - **Please select a load method**:
+     - **Full Load**: Load and pre-compute all data in the source table according to different combinations of dimensions and measures.
+     - **Incremental Load**: Load and pre-compute data within the specified time range in the source table according to combinations of dimensions and measures. You also need to specify the following parameters if this option selected. 
+       - **Partition Table**: Fact table (default and cannot be changed) 
+       - **Time Partition Column**: Select a column of the time type in the partition table. 
+       - **Time Format**: Select the time format. Or you can click ![](images/time_format.png) and Kyligece will automatically fill in the time format.
+   - **Advanced Setting**: Use the data filter to filter out null values or data meeting certain requirements. Use `AND` or `OR` to associate multiple filters, for example, `BUYER_ID <> 0001 AND COUNT_ITEM > 1000 OR TOTAL_PRICE = 1000`. 
+   - **Add Base Indexes**: Add the following base indexes. This option is enabled by default so base indexes will automatically update when model dimensions and measures change. 
+     - Base aggregate index: It includes all model dimensions and measures. 
+     - Base table index: It includes all columns of model dimensions and measures.
+
+3. Click **Submit**. 
+
+   After the model is saved, you can click **View Index** in the **Notice** dialog box to check the aggregate index and table index that Kylin automatically creates. 
+
+### Next steps
+
+For the newly created base indexes, you need to [build them](load_data/build_index.md) so Kylin can run pre-computation based on these indexes to accelerate queries. 
+
+> [!NOTE]
+>
+> There are few scenarios where base indexes can be used to accelerate queries. To improve query efficiency, you need to add more indexes to the model. For more information, see [Aggregate index](model_design/aggregation_group.md) and [Table index](model_design/table_index.md).
+
+### FAQ
+
+- Question: Why did I get an error when saving the time partition column settings?
+
+  Answer: This error occurs when the time format of the time partition column does not match the target format. Kylin supports the following time formats:  `yyyyMMdd`, `yyyy-MM-dd`, `yyyy/MM/dd`, `yyyy-MM-dd HH:mm:ss`, `yyyy-MM-dd HH:mm:ss.SSS`, `yyyy-MM`, `yyyyMM` and `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`.
+
+  Kylin also supports customized time formats if the following conditions are met:
+
+  - Any combination of yyyy, MM, dd, HH, mm, ss, and SSS is used with the elements in ascending order. 
+  - Hyphens (-), forward slashes (/), columns (:), or spaces are used as separators. 
+  - Unformatted letters are enclosed in single quotation marks ('). For example, 'T' is recognized as T.
+
+  > [!NOTE] 
+  >
+  > When the time format is customized as `yyyyMMddHHmmss`, the corresponding column in the Hive table should be strings, or Kylin may fail to recognize column data.
+  
+- Question: I've modified several tables from the same model. Why did I get an error when reloading these tables? 
+
+  Answer: Since Kylin currently only supports loading a single table at a time, please edit and reload tables one by one, rather than reload several modified tables at a time.  
+
+- Question: I've modified several tables from the same model. Why did I get an error when reloading these tables? 
+
+  Answer: Since Kylin currently only supports loading a single table at a time, please edit and reload tables one by one, rather than reload several modified tables at a time.  
+  
+- Question: What are the rules for the model to go online?
+
+  Answer: It will automatically switch to online when the building job completed. However, customer may need to continually build some historical data during a period when creating a new report or testing, and may not want this model to serve any query unless it has built all data. In this case, we also offer a model level configuration to ensure users can control the model status manually . After configuring the `kylin.model.offline` to `true` (default value is false) in model rewriting [...]
+  
+  ![](images/add_model_set.png)
diff --git a/website/docs/modeling/model_concepts_operations.md b/website/docs/modeling/model_concepts_operations.md
new file mode 100644
index 0000000000..dc7bed313c
--- /dev/null
+++ b/website/docs/modeling/model_concepts_operations.md
@@ -0,0 +1,137 @@
+---
+title: Model Concepts and Operations
+language: en
+sidebar_label: Model Concepts and Operations
+pagination_label: Model Concepts and Operations
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - model concepts
+    - model operations
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+Model design refers to build the star model or snowflake model based on data table and multidimensional modeling theory. The main contents of model design are as follows:
+
+- Define Fact Table and Dimension Table
+- Define the Association Relationship between Tables
+- Define Dimension and Measurement
+
+### <span id="model">Model List</span>
+
+ You can create and design models manually. Below are the main content of model list:
+
+1. Log in to Web UI, switch to a project.
+
+2. Navigate to **Data Asset -> Model** page, where models are shown in a list. The picture below is the index group list in AI augmented mode:
+
+	![Model List](images/model_list.png)
+	
+	**Fields Explanation:**
+	
+	- **Model Name**: Model's name.
+	
+	  - **Status**: There are four statuses.
+      - *ONLINE* indicates this model is online and is able to answer queries.
+	    - *OFFLINE* indicates this model is offline and not available to answer queries. We recommend using offline when you need to edit the model. 
+	    - *BROKEN* indicates this model is broken and not available. Mostly happens when the schemas of related source tables have changed, for instance, a related source table is deleted.
+	    - *WARNING* indicates this model is warning and can only server parts of queries. Mostly happens when the segments exist holes or indexes are waiting to build.
+	  - **Last Updated Time**: The lastest time to update model.
+	
+	- **More Actions**: The **More Actions** button will appear when you are hovering on model name area, please refer to [Model Operations](#operation) for details.
+	
+	- **Owner**: The user who created this model.
+	
+	- **Description**: Model description.
+
+	- **Fact Table**: The fact table of this model.
+	
+	- **Types**: Model types, which include *Batch Model*, *Streaming Model*, *Fusion Model*
+	
+	- **Usage**: Hit count by SQL statements in the last 30 days. Update every 30 minutes.
+
+	- **Rows**:  The rows of loaded data in this model.
+	
+	- **Storage**: The storage size of loaded data in this model, which combines the storage size of all Segments data.
+	
+	  > Tip: When the tiered storage is turned on, the total storage size of the data loaded into the tiered storage (ClickHouse) will be displayed.
+	
+	- **Expansion Rate**: The ratio of the storage size of the built data to the storage size of the corresponding source table data under the model. Expansion Rate = Storage Size / Source Table Size.
+	      
+	  
+	> Notice: The expansion rate won't show if the storage size is less than 1GB.
+	
+	- **Index Amount**: The amount of indexes in this model.
+
+### <span id="operation">Model Operation</span>
+
+You are only allowed to operate on models in **AI augmented mode**. You can hover on the righ tmost column **Actions** of the model list to get the action names. Specific actions are listed below:
+
+- **Edit**: Click on the pencil shape button, enter into the model editing page.
+
+- **Build Index**: Loads data for models. You can choose the data range in the pop-up window.
+
+- **Model Partition**: Set partition column for the model.
+
+- **Export Model**: Export single model metadata.
+
+  > **Note**: Since the locked indexes will be deleted after the new indexes have been built, the exported model metadata will not include the locked index.
+
+- **Export TDS**: Export TDS file of the model .
+
+- **Rename**: Renames the model.
+
+- **Clone**: Clones an identical model. You can give a new name for this new model. The new model has the same fact table, dimension tables, join relationship, dimensions, measures, computed columns, date partition column, aggregate indexes, table indexes, etc. as the origin model. But the new model does not have data, you need to load data for this cloned model manually.
+
+  > **Note**: Since the locked indexes will be deleted after the new indexes have been built, the cloned model will not include the locked index.
+
+- **Change Owner**:Change model owner. Only system administrators and project administrators have the authority to modify model owner.
+
+- **Delete**: Deletes the model, remove the loaded data at the same time.
+
+- **Purge**: Purges all loaded data in this model.
+
+- **Offline**: Makes a *Online / Warning* model offline. An offline model cannot answer any queries.
+
+- **Online**: Makes a *Offline* model online. An online model should be able to answer related queries.
+
+> **Note:** If the model is in *BROKEN* status, only the delete operation is allowed.
+
+
+### <span id="more">Model Details</span>
+
+Models contain Segments and indexes. You can click model name to unfold the detailed information, as shown below:
+
+![Details](images/modellist_more_info.png)
+
+- **Overview**: Check Overview details, please refer to [Model Overview](#overview) for more.
+- **Data Features**: Check data features.
+- **Segment**: Check Segment details, please refer to [Segment Operation and Settings](load_data/segment_operation_settings.md) for more.
+- **Index**: Review the model indexes.
+  - **Index Overview**: Check index overview.
+  - **Aggregate Group**: Add or check defined aggregate indexes, please refer to [Aggregate Index](model_design/aggregation_group.md) for more details.
+  - **Table Index**: Add or check defined table indexes, please refer to [Table Index](model_design/table_index.md) for more details.
+- **Developers**: Check information for developers.
+  - **JSON**: Kylin describes the information of models (index groups) in `JSON` format, such as design, dimensions, measures, etc.
+  - **SQL**: The SQL statement consists of related information about tables and columns in the model, such as the join conditions between the tables.
+
+### <span id="overview">Model Overview</span>
+
+After expanding the model information, you can see the model overview page, which will help you to quickly get the model information.
+
+![Model Overview](images/model_overview/unfold_model.png)
+
+On this page, you can view the ER diagram of the model.
+
+![View ER Diagram](images/model_overview/er.png)
+
+What's more, you can view the dimensions and measures information contained in the model.
+
+![View Dimensions Information](images/model_overview/dimensions.png)
+
+![View Measures Information](images/model_overview/measures.png)
diff --git a/website/docs/modeling/model_design/advance_guide/fast_bitmap.md b/website/docs/modeling/model_design/advance_guide/fast_bitmap.md
new file mode 100644
index 0000000000..19576ec9a8
--- /dev/null
+++ b/website/docs/modeling/model_design/advance_guide/fast_bitmap.md
@@ -0,0 +1,73 @@
+---
+title: Query Optimization for Exact Hit Index
+language: en
+sidebar_label: Query Optimization for Exact Hit Index
+pagination_label: Query Optimization for Exact Hit Index
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - query optimization for exact hit index
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+Since Kylin 5, the system has enhanced the optimization of queries that hit the index exactly (the query contains dimensions that are exactly the same as the dimensions of the selected index), and it also improve the performance in count distinct scenario.
+
+With the following settings, optimization of precise count distinct queries can be applied:
+
+1. Build a model that contains precise count distinct measure.
+2. Modify the configuration in model level and add custom settings:
+   `kylin.query.fast-bitmap-enabled = true`
+3. Build the model
+4. Query the SQL statements with exact indexes
+
+### Configuration Level
+
+This configuration is only available at the model level.
+
+### Query Example
+
+Taking Kylin's sample data set TPC-H as an example, the fact table LINEITEM simulates the recording of transaction data. The following query gets the number of orders under different sales dates.
+
+```sql
+SELECT  COUNT(distinct LINEITEM.L_ORDERKEY),
+        LINEITEM.L_SHIPDATE
+FROM TPCH_FLAT_ORC_50.LINEITEM
+JOIN TPCH_FLAT_ORC_50.ORDERS
+ON TPCH_FLAT_ORC_50.LINEITEM.L_ORDERKEY = TPCH_FLAT_ORC_50.ORDERS.O_ORDERKEY
+GROUP BY  LINEITEM.L_SHIPDATE
+```
+
+
+ 1. Create the model:
+    ![Create Model](images/model.png)
+
+ 2. Switch to the **Model Settings** interface:
+    <img src="images/model_config_1.en.png" alt="Settings" style="zoom:50%;" />
+
+ 3. Enter the configuration to enable the function:
+    <img src="images/model_config_2.en.png" alt="Settings" style="zoom:50%;" />
+
+ 4. Add  indexes:
+    ![Add Index](images/add_index.png)
+
+ 5. After building successfully, the query performance is improved a lot when the query exactly matches index.
+
+   ![Query before optimization](images/query_old.png)
+
+   ![Query after optimization](images/query_new.png)
+
+6. Compare the execution plans before and after optimization
+   
+   ![Queries before optimization](images/spark_plan_old.png)
+
+   ![Optimized query](images/spark_plan_new.png)
+
+
+### Known Limitations
+
+1. This operation will lead to a longer build time and almost double storage cost.
+2. The indexes need to be refreshed when enabling this function.
diff --git a/website/docs/modeling/model_design/advance_guide/images/add_index.png b/website/docs/modeling/model_design/advance_guide/images/add_index.png
new file mode 100644
index 0000000000..c8d4134ac2
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/add_index.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/low_usage.png b/website/docs/modeling/model_design/advance_guide/images/low_usage.png
new file mode 100644
index 0000000000..9b26333390
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/low_usage.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/model.png b/website/docs/modeling/model_design/advance_guide/images/model.png
new file mode 100644
index 0000000000..f2b48e2b5d
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/model.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/model_check.png b/website/docs/modeling/model_design/advance_guide/images/model_check.png
new file mode 100644
index 0000000000..b711079f1c
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/model_check.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/model_config_1.png b/website/docs/modeling/model_design/advance_guide/images/model_config_1.png
new file mode 100644
index 0000000000..c1013a7957
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/model_config_1.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/model_config_2.png b/website/docs/modeling/model_design/advance_guide/images/model_config_2.png
new file mode 100644
index 0000000000..451b380718
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/model_config_2.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/model_export.png b/website/docs/modeling/model_design/advance_guide/images/model_export.png
new file mode 100644
index 0000000000..98be4465ed
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/model_export.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/model_publish.png b/website/docs/modeling/model_design/advance_guide/images/model_publish.png
new file mode 100644
index 0000000000..b5b6783b5d
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/model_publish.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/model_upload.png b/website/docs/modeling/model_design/advance_guide/images/model_upload.png
new file mode 100644
index 0000000000..1db29a6367
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/model_upload.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_first.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_first.png
new file mode 100644
index 0000000000..aaeccf177a
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_first.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_second.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_second.png
new file mode 100644
index 0000000000..24fd1fe8f0
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_second.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_second_0.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_second_0.png
new file mode 100644
index 0000000000..909840c351
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_second_0.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_second_1.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_second_1.png
new file mode 100644
index 0000000000..87f5c7fbec
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partioning_build_subp_second_1.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_add_subp_value.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_add_subp_value.png
new file mode 100644
index 0000000000..d77a568481
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_add_subp_value.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_close.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_close.png
new file mode 100644
index 0000000000..a51b8a2cd4
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_close.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_model_save.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_model_save.png
new file mode 100644
index 0000000000..bc1b2b43d8
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_model_save.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_model_set.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_model_set.png
new file mode 100644
index 0000000000..7474181686
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_model_set.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_query.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_query.png
new file mode 100644
index 0000000000..4b123f9f8c
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_query.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_set.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_set.png
new file mode 100644
index 0000000000..d0abb0cbff
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_set.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_subp_value.png b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_subp_value.png
new file mode 100644
index 0000000000..ba99c09b81
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/multilevel_partitioning_subp_value.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/query_new.png b/website/docs/modeling/model_design/advance_guide/images/query_new.png
new file mode 100644
index 0000000000..fc214c74d3
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/query_new.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/query_old.png b/website/docs/modeling/model_design/advance_guide/images/query_old.png
new file mode 100644
index 0000000000..0e7db49a2d
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/query_old.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/spark_plan_new.png b/website/docs/modeling/model_design/advance_guide/images/spark_plan_new.png
new file mode 100644
index 0000000000..034dcaf2e0
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/spark_plan_new.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/images/spark_plan_old.png b/website/docs/modeling/model_design/advance_guide/images/spark_plan_old.png
new file mode 100644
index 0000000000..689b9ac1f0
Binary files /dev/null and b/website/docs/modeling/model_design/advance_guide/images/spark_plan_old.png differ
diff --git a/website/docs/modeling/model_design/advance_guide/integer_encoding.md b/website/docs/modeling/model_design/advance_guide/integer_encoding.md
new file mode 100644
index 0000000000..9f783e6f28
--- /dev/null
+++ b/website/docs/modeling/model_design/advance_guide/integer_encoding.md
@@ -0,0 +1,36 @@
+---
+title: Skip Dictionary Encoding Optimization for Integer Type 
+language: en
+sidebar_label: Skip Dictionary Encoding Optimization for Integer Type 
+pagination_label: Skip Dictionary Encoding Optimization for Integer Type 
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - skip dictionary encoding optimization for integer type
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+Starting from Kylin 5, the system supports no dictionary encoding for integer types
+
+With the following settings, optimization of precise deduplication queries can be started:
+
+1. Build a model that contains precise deduplication metrics
+
+2. Modify the configuration of the model and add custom settings to the model in the settings interface:
+    `kylin.query.skip-encode-integer-enabled = true`
+
+3. Build the model
+
+### Configuration scope
+
+This configuration is only available at the model level
+
+### Precautions
+
+1. This operation can improve the build performance, if the data hash is serious, it may cause the inflation rate to be too high
+2. If the value of this parameter changes, you need to re-brush the entire model
+
diff --git a/website/docs/modeling/model_design/advance_guide/intro.md b/website/docs/modeling/model_design/advance_guide/intro.md
new file mode 100644
index 0000000000..a3db8e4a75
--- /dev/null
+++ b/website/docs/modeling/model_design/advance_guide/intro.md
@@ -0,0 +1,17 @@
+---
+title: Model Advanced Settings
+language: en
+sidebar_label: Model Advanced Settings
+pagination_label: Model Advanced Settings
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - model advanced settings
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+This section describes the advanced settings applied to the model.
diff --git a/website/docs/modeling/model_design/advance_guide/model_metadata_managment.md b/website/docs/modeling/model_design/advance_guide/model_metadata_managment.md
new file mode 100644
index 0000000000..7d76bd4ccf
--- /dev/null
+++ b/website/docs/modeling/model_design/advance_guide/model_metadata_managment.md
@@ -0,0 +1,133 @@
+---
+title: Model Metadata Management
+language: en
+sidebar_label: Model Metadata Management
+pagination_label: Model Metadata Management
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - model metadata management
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+## Model Metadata Management
+
+Kylin is stateless service. All state information is stored in metadata. The model is the core asset of the KE cluster. The model metadata describes the content of the model information in detail.
+
+The movement of models in different environments is an important process of actual production. Therefore, importing and exporting metadata is a crucial link in operation and maintenance. Kylin provides import and export model metadata functions.
+
+
+### Model Publishing {#model_publish}
+
+In many companies, in order to ensure the stability of the production environment, the release and change of models in the production environment is very strict. Users often need to deploy an additional independent development environment for data development and test verification (and possibly there is still a test environment), and migrating the data model from the development environment to the production environment is the process of model release. At the same time, this process need [...]
+
+![](images/model_publish.png)
+
+
+### Model Metadata Export {#model_metadata_export}
+
+#### Export contents
+
+Model metadata export, you can export single or multiple models to a compressed package in zip format.
+
+The exported model metadata range:
+
+- Include: model definition information, such as the tables referenced by the model, table relationships, partition columns, calculable columns, filter conditions, measures, dimensions, aggregation groups, index content, etc.
+
+- Not include: Segment information, building data, index status, etc.
+
+- You can choose whether to include recommendations, model rewrite settings and sub-partition values in multi-level partitioning models. After selecting export, if the target system has a model with the same name, the recommendations, model rewrite settings and sub-partition values of the target model will be directly overwritten during import.
+
+Notes:
+
+- To ensure the integrity of the file, please do not unzip the file or modify the content of the file. The second half of the file name is the file integrity check code. If you need to modify the file name to increase recognition, please keep the check code unchanged.
+
+
+#### Export operation
+
+- Export a single model
+
+   Click **Data Asset->Model** in the left navigation bar to enter the **Model List** page. Through**...(More Actions) - Export Model** of a single model, the specified model can be exported in the format of a zip compressed package.
+
+- Export multiple models
+
+   - You can click the **Export Model** button on the **Model List Page**, select multiple models and export.
+   - Or click the **Admin** button on the right of the status bar at the top of the page, in the project list page, in a single project **Actions - More Actions - Export Model**, select multiple models and export.
+
+![](images/model_export.png)
+
+
+### Model Metadata Import {#model_metadata_import}
+
+#### Import operation
+
+- On the **Model List** page, click the drop-down button behind **+Model**, select **Import Model**, and upload the model metadata compression package.
+
+- Or enter the **Admin** page, on the project list page, select the project to import the model metadata, in **Action - ...(More Actions) - Import Model**, upload the model metadata compression package.
+
+  ![](images/model_upload.png)
+  
+#### Select operation type
+
+When parsing the metadata package, the system will use **model name** as the unique identifier to distinguish the model, and match the target system with the model metadata in the metadata package. After the model is parsed by the system, there will be three operations to choose from: **Replace**, **Add New**, **Not Import**, the following will introduce in detail the default appearance of these three operations and whether they can be used as the next operation conditions of:
+
+- **Not import**
+
+  - Operation instructions: The system cannot import the model, or the user actively chooses not to continue importing the model.
+
+  - The condition that appears by default: **cannot find a table or column in the target system data source in the model to be imported, or the data type of the column is inconsistent**.
+
+  - Whether it can be switched to other operations:No.
+
+- **Replace**
+
+  - Operation instructions: A model with the same name already exists in the target system, and **the model has no major changes**, the system recommends using the model in the metadata package to replace the model with the same name in the target system.
+
+    The criteria for no major changes in the model are:
+
+    - The fact table and the dimension table are exactly the same.
+    - The table relationship is completely consistent, including table connection conditions, table relationships, and column join conditions.
+    - The partition column and format are exactly the same, including the model loading method (full and incremental), excluding multi-level partition sub-partition value differences.
+    - The data filter conditions are completely consistent.
+
+  - Condition of appearance by default: A model with the same name already exists in the target system, and there is no major change in the model.
+
+  - Whether it can be switched to other operations: it can be switched to 'add new' or 'not import'. When manually switching to 'add new', please also change the model name to a name that does not exist in the target system to avoid model name conflicts.
+
+  - **Note: Replacing may result in the deletion of part of the built data. Please backup and double check before importing.**
+
+- **Add New**
+  
+  - Operation instructions: There is no model with the same name in the target system, and the model to be imported has the **table and column in the target system data source, and the column data type is consistent**. Or there is a model with the same name in the target system, and the model has major changes. For major changes in the model, refer to the above description of the replace operation.
+  - Conditions that appear by default: the same as the two conditions in the operating instructions.
+  - Whether it can be switched to other operations: it can be switched to not import.
+
+When the model to be imported is selected, the model to be imported will be displayed on the right side, the difference between the model with the same name and the data source in the current project. Differences will be divided into four categories: **Not found**, **Add**, **Delete**, **Change** display.
+
+After the model is imported, it may be necessary to build the newly added index before it can serve queries.
+
+![](images/model_check.png)
+
+### Some Practice {#model_io_practice}
+
+#### How to deal with model rewrite settings
+
+Different environments may have different requirements for model rewrite settings. When you export a model, you can choose whether to export the model rewrite settings at the same time. Later, when you import the model, the model settings of the target system with the same name will be overwrited.
+
+#### How to deal with sub-partition values of multi-level partition models
+
+Different environments generally have different values of multi-level partition sub-partitions. For models that use the multi-level partition, you can choose whether to export the multi-level sub-partition values ​​at the same time when exporting the model. When importing the model, the multi-level partition sub-partition values will be overwrited in the target system. The building data corresponding sub-partition value will be deleted while importing.
+
+#### Model metadata export and import related API
+
+You can also export and import model metadata through API. For details, please refer to: [Model Import and Export API](../../../restapi/model_api/model_import_and_export_api.md)
+
+
+### Known limitation {#know_limitation}
+- **After the model is imported, it cannot be undone. Please make a backup of the model in advance**.
+- Only supports export and import between the first two versions of the same version number. For example, the model metadata package exported by Kylin 4.2.x does not support importing in Kylin 4.3.x.
diff --git a/website/docs/modeling/model_design/advance_guide/multilevel_partitioning.md b/website/docs/modeling/model_design/advance_guide/multilevel_partitioning.md
new file mode 100644
index 0000000000..cd252e3ff3
--- /dev/null
+++ b/website/docs/modeling/model_design/advance_guide/multilevel_partitioning.md
@@ -0,0 +1,160 @@
+---
+title: Multi-Level Partitioning (Beta)
+language: en
+sidebar_label: Multi-Level Partitioning (Beta)
+pagination_label: Multi-Level Partitioning (Beta)
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - multi-level partitioning (beta)
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+### <span id="what">What is a multi-level partition</span>
+
+Kylin multi-level partitioning means that in addition to partition management based on **time partition column**, the model can also be partitioned based on **sub-partition**.
+
+In some analysis scenarios, in addition to partition management based on date or time, it is also necessary to perform partition management based on other dimensions, such as region, branch, etc. We call this sub-partition. For example, for some users who conduct business across regions, the time for data preparation varies in different regions due to differences of business end time. Users can set a region as a sub-region. When the data in a certain region is completed, the data in the  [...]
+
+Kylin multi-level partition currently only supports two level partition.
+
+
+
+### <span id="open">How to enable the multi-level partition function</span>
+
+You can enable or disable support for multi-level partitioning in the model **Settings-Advanced Settings-Multi-level Partitioning**.
+
+![Set Multilevel Partitioning](images/multilevel_partitioning_set.png)
+
+**Note:** When the multi-level partitioning of the model is closed, the model using the multi-level partitioning will be automatically offline. If you need to go online, you need to delete the sub-partition before going online or turn on this option.
+
+![Turn off Multilevel Partitioning](images/multilevel_partitioning_close.png)
+
+
+### <span id="set">Set model sub-partition</span>
+
+After enabling multi-level partitioning, you can create a model in **Data Asset- Model- +Model**. Then you can choose to add sub-partition columns when saving the model, and the types currently supported as sub-partition columns are **tinyint,smallint,int/integer,bigint,double,decimal,timestamp,date,varchar,char,boolean**.
+
+<img src="./images/multilevel_partitioning_model_save.en.png" width="40%"/>
+
+Or you can adjust the sub-partition column in **Model List-...(More Actions)-Model Partition**.
+
+
+
+### <span id="sub_partition">Manage model sub-partition values</span>
+
+You can also add, delete or search for sub-partition values in **Model List-...(More Actions)-Manage Sub-Partition Values**.
+
+When adding sub-partition values, the system does not check the correctness. The system allows adding sub-partition values ​​that do not yet exist. When querying, the sub-partition value must be exactly the same with the sub-partition value to match (case sensitive, wildcards matching is not supported). Please ensure that the added sub-partition value meets your expectations.
+
+![](./images/multilevel_partitioning_subp_value.png)
+
+
+
+### <span id="build">Build sub-partition</span>
+
+When constructing a Segment of a new time range, you can click **Model List-Build Index** and specify the sub-partition value during construction
+
+![](images/multilevel_partioning_build_subp_first.png)
+
+When the segment already exists, but only some sub-partitions under the segment have been constructed, you can click the model name to enter the model information page. You can click **Segment** to view the constructed sub-partitions in **Subpartition**, or continue to build sub-partitions that have not been constructed
+
+![](images/multilevel_partioning_build_subp_second_0.png)
+
+![](images/multilevel_partioning_build_subp_second_1.png)
+
+![](images/multilevel_partioning_build_subp_second.png)
+
+
+If you need to merge Segments, you need to ensure that the sub-partition values are consistent.
+
+The sub-partition has three states, namely:
+
+- **ONLINE**: indicates that the construction has been completed and can serve the query
+- **LOADING**: means under construction
+- **REFRESHING**: indicates that it is being refreshed, and it can still serve the query during refreshing
+
+
+
+### <span id="query">Query behavior under multi-level partition</span>
+
+When Kylin system answers queries, there are mainly the following rules:
+
+- Segment time range defines the time range that the model can answer. When the query specifies an undefined time range, this part of the data returns empty.
+- Segment sub-partition defines the range of sub-partition values that the model can answer. If the query specifies a sub-partition value that is not defined by the model, this part of the data returns empty. If the query specifies a sub-partition that is not built under the included time range, query pushdown will be used.
+- If the index is in the time range and sub-partition value range in the Segment, if all of them can be satisfied and serve the query, the index will be used first, and if all of them cannot be satisfied, the pushdown query will be used (prerequisite for enabling pushdown).
+
+
+
+The following common cases help understand. Suppose there are 4 Segments in the model, and the project has been opened for query pushdown
+
+- Segment 1, the time range is [2015-2016), the sub-partitions constructed are Partition 1, Partition 2, including indexes Index A and Index B
+- Segment 2, the time range is [2016-2017), and the sub-partitions constructed are Partition 1, Partition 2, Partition 3, including indexes Index A and Index C
+- Segment 3, the time range is [2017-2018), and the sub-partitions constructed are Partition 1, Partition 2, Partition 4, including indexes Index A ,Index B and Index C
+- Segment 4, the time range is [2018-2019), does not exist in the system, for ease of understanding, the code is Segment 4
+- Segment 5, the time range is [2019-2020), reserved Segment, does not include subpartitions and indexes
+
+![](images/multilevel_partitioning_query.png)
+
+When there are the following modes of query, the system will answer the query in this way:
+
+**Case 1: Query without any time partition conditions**
+
+The system will answer the query results of the total time range of all Segments, in this example the time range of Segment 1, Segment 2, Segment 3, Segment 5
+
+**Case 2: The query specified a specific time partition [2015-2016), but did not specify any model sub-partition value**
+
+The system will judge whether Index A and Index B can answer the query, if they can be answered, the index will answer, otherwise the query will be answered by the pushdown query engine
+
+**Case3: The query specifies a specific time partition [2015-2017), and specifies that the model sub-partition value is  Partition 1, Partition 2**
+
+The system will judge whether Index A, Index B, Index C can answer the query, if it can be answered, the index will answer, otherwise the query will be answered by the pushdown query engine
+
+**Case4: The query specifies a specific time partition [2015-2016), and specifies that the model sub-partition value is equal to Partition 3**
+
+The query specifies Partition 3 that is not built under Segment 1, and the system will need to answer through the pushdown query engine
+
+**Case5: The query specifies a specific time partition [2015-2018), and the model sub-partition value is equal to Partition 5**
+
+**Partition 5** is not defined in the model, the system will return No Data
+
+**Case6: The query specifies a specific time partition [2015-2019), and the model sub-partition value is equal to Partition 1**
+
+At this time, the query contains an undefined time range [2018-2019), and this part of the data is empty. According to the range of 2015-2018, within the range of Partition 1, whether Index A, Index B, and Index C can answer the query, and how it can be answered will be answered by the index, otherwise the query pressure engine will answer.
+
+**Case7: The query specifies a specific time partition [2015-2020), and the model sub-partition value is equal to Partition 1**
+
+The query contains a segment that has not built any index data. It can be judged that the index must not fully meet the time range included in the query, and the system will answer through the  pushdown query engine.
+
+
+
+### <span id="limitation">Known limitation</span>
+
+- Kylin multi-level partition currently only supports two le partition.
+- If you need to merge Segments, you need to ensure that the sub-partition values are consistent.
+- Please control the number of sub-partition values within 2000. If the number of partition values or the average length of which is too large, when submitting the building job or making other operations, the packet size limit during metastore communication may be exceeded and an error will be reported. For more detailed information, please refer to the FAQ below.
+
+### <span id="faq">FAQ</span>
+**Q: When there are too many sub-partition values, errors related to `max_allowed_packet` or `innodb_log_file_size` may occur, then what should I do? **
+
+Error One:
+
+Prompt: `The result packet of MySQL exceeds the limit. Please contact the admin to adjust the value of “max_allowed_packet“ as 256M in MySQL.`.
+
+Reason: The default value of metastore MySQL configuration `max_allowed_packet` is small, which will limit the data packet size when Kylin node communicates with MySQL. When the number of sub-partition values is too large, the packet size in actual communication will exceed this limit.
+For more information, please refer to Mysql [Official Document](https://dev.mysql.com/doc/refman/8.0/en/packet-too-large.html)
+
+Solution: You can adjust MySQL configuration as `max_allowed_packet=256M` to avoid this problem.
+
+Error Two:
+
+Prompt: The build fails. The kylin.log log prompts `The size of BLOB/TEXT data inserted in one transaction is greater than 10% of redo log size. Increase the redo log size using innodb_log_file_size`
+
+Reason: The amount of data written to mysql redo log in a single transaction exceeds 10% of innodb_log_file_size.
+
+Solution: Refer to Mysql [official document](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size) to increase the configuration item of `innodb_log_file_size`. You need to restart the mysqld service, please be cautious.
diff --git a/website/docs/modeling/model_design/aggregation_group.md b/website/docs/modeling/model_design/aggregation_group.md
new file mode 100644
index 0000000000..be78eefcb1
--- /dev/null
+++ b/website/docs/modeling/model_design/aggregation_group.md
@@ -0,0 +1,416 @@
+---
+title: Aggregate Index
+language: en
+sidebar_label: Aggregate Index
+pagination_label: Aggregate Index
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - aggregate index
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+Curse of dimensionality is an infamous problem for all OLAP engines based on precalculation. In Kylin, we designed the aggregation group mechanism to fix this issue.
+
+
+### <span id="background">Introduction</span>
+
+It is a known fact that Kylin speeds up query performance by pre-calculating aggregate index group (different combination of all dimensions, aka index). The problem is that indices grow exponentially with the dimension. For example, there are a total of 7 possible indices for an aggregate index group with 3 dimensions, however there are 15 possible cuboids for a aggregate index with 4 dimensions. Kylin is using scalable computation framework (Spark) and distributed storage (HDFS) to comp [...]
+
+
+![Aggregate Index Group](images/agg/AGG-1.png)
+
+
+
+To alleviate the pressure on index building, Kylin has released a series of advanced settings to help users filter on only those indices being used. These advanced settings include **Aggregate Group**, **Joint Dimension**, **Hierarchy Dimension**, and **Mandatory Dimension**.  
+
+
+
+### <span id="edit">Edit Aggregate Index</span>
+
+In the **Data Asset -> Model** page, click the model name to get more information, you can click **Index**, then click **Aggregate Group** button under **+ Index** in the **Index Overview** tab to enter the  aggregate index editing page. Or you can click **+** (Add Aggregate Group) button in the **Aggregate Group** tab to enter the page. Then you can edit the aggregate index in the pop-up window shown below, and define dimensions and measures in different aggregate groups according to yo [...]
+
+
+![Edit Aggregate Index](images/agg_1.png)
+
+**Step 1: Dimension Setting**
+
+The initial interface is to edit *Aggregation Group 1*. First you need to set the dimension, select **include **, click **Edit** and select from the list all the dimensions included by *Aggregation Group 1*. The items in the list is the dimensions you added in model. Kylin will build different combinations of all the dimensions you choose, this is called "build aggregate index".
+
+Users can then set **Mandatory Dimension**, **Hierarchy Dimension**, and **Joint Dimension** in the *Aggregate Group 1*. Dimensions under these three settings have to be included in **Include** under this aggregate group first. You can add aggregate groups as needed. After editing all the aggregate groups, click the button beside **Index Amount** on the bottom left corner, estimated index number will be calculated and displayed beside the name of the aggregate group, the total estimated  [...]
+
+![Dimension Setting](images/agg_2.png)
+
+We recommend selecting frequently paired grouping dimensions and filtering dimensions into the aggregate group according to the cardinality from high to low. For example, you often query for the supplier ID `SUPPKEY` and the product ID `PARTKEY`, by adding the dimension `SUPPKEY` and dimension `PARTKEY` into the aggregate group, you can view the cardinality of these two relevant columns in **Data Asset-Data Source**. If the cardinality of `SUPPKEY` is greater than the cardinality of `PAR [...]
+
+**Step 1: Measure Setting**
+
+In the measure settings, all measures defined in the model are selected by default. You can define the measures corresponding to the selected dimensions in the aggregate group according to the business scenario. The aggregate index contains only the measures selected in the aggregate group. Whether or not you manually add a measure to the model, there is at least one measure `COUNT_ALL` in the aggregate group, which means ` COUNT(*) `.
+
+![Measure Setting](images/agg/agg_measure.png)
+
+Next, we will explain the mechanism of these advanced settings and provide use cases in the following sections. 
+
+
+
+### <span id="agg">Aggregate Group</span>
+
+You can divide combination of dimensions and measures they focus on in several groups, and these groups are called **Aggregate Group.**  
+
+As the aggregate index group shows at the begining of this chapter, if you only need dimension combination of `[Dimension AB - Measure M1]` and `[Dimension CD - Measure M2]`, then aggregate index group can be divided into two aggregate groups, group AB-M1 and group CD-M2. As shown below, the number of index can be reduced from 15 to 7. 
+
+
+![Aggregate Group](images/agg/AGG-2.png)
+
+The aggregate groups that end users need might contain overlapping dimensions, for example, aggregate group ABC and aggregate group BCD both contain dimension B and C. If these two aggregate groups contain the same measures, these aggregate groups will derive the sample index, for example aggregate group ABC will derive index BC and so does aggregate group BCD. An index will not be generated multiple times, if it can be derived from more than one aggregate group, as shown below.
+
+![Derive same index](images/agg/AGG-3.png)
+
+
+
+According to your business scenario, you can decide whether to add an index that contains dimensions and measures defined in all aggregate groups. The index can answer queries across multiple aggregate groups, but this will impact query performance. In addition to this, there are some storage and building costs by adding this index. You can define it by clicking **+** (Add Setting Item) and setting the item `is-base-cuboid-always-valid` in **Setting -> Model Settings**.
+
+With aggregate groups, you can filter the granularity of index to get the dimensionality you want.                     
+
+
+
+#### Use Case of Aggregate Group
+
+Assume a transactional aggregate index, which contains these dimension: Customer ID `buyer_id`, Transaction Date `cal_dt`, Payment Type `pay_type`and Customer City `city`. Sometimes, analyst need to group dimension City, Cal_dt and Pay_Type to understand different payment type in different cities. There are other times analysts need to group dimension city, cal_dt andbuy_id together to understand customer behavior in different cities. As shown in the example above, we recommend building  [...]
+
+
+![](images/agg/AGG-4.png)
+
+
+
+Aggregate Group 1:  `[cal_dt,city, pay_type]`
+
+Aggregate Group 2: `[cal_dt,city, buyer_id]`
+
+Regardless of other situations, those two aggregate groups can reduce 3 unnecessary index: [pay_type, buyer_id]、[city,pay_type, buyer_id] and [cal_dt, pay_type, buyer_id], so storage space and build time can be saved. 
+
+
+Case 1: 
+```sql
+Select cal_dt, city, pay_type, count(*) from table
+Group by cal_dt, city, pay_type
+```
+will hit on index [cal_dt, city, pay_type]
+
+
+Case 2: 
+
+```sql
+Select cal_dt, city, buyer_id, count(*) from table
+Group by cal_dt, city, buyer_id 
+```
+will hit on index [cal_dt, city, buyer_id]
+
+Case 3: If one unusual query occur
+
+```sql
+Select pay_type, buyer_id,count(*) from table
+Group by pay_type, buyer_id
+```
+No index can be hit, so Kylin will calculate the result based on existing index on-demand. 
+
+
+
+### <span id="mandatory">Mandatory Dimension</span>
+
+Sometimes you might be interested in analysis with one or more specific dimensions, any queries will include one specific dimension. In this case, this dimension can be set as **Mandatory Dimension**. Once set only the index with this dimension will be calculated. In the example at the begining of this chapter, if dimension A is set as mandatory, then index will be calculated as shown below. The number of index will be reduced from 15 to 8.
+![Reduce dimension combinations with Mandatory Dimension](images/agg/Mandatory-2.png)
+
+
+![Aggregate index group after reducing dimension combinations](images/agg/Mandatory-3.png)
+
+
+
+#### Use Case of Mandatory Dimension
+Assume a transactional aggregate index that includes transaction date, transaction location, product, payment type. Transaction date is a frequently used group by dimension. If transaction date is set as a mandatory dimension, the combination of dimensions will be calculated as shown below:
+
+![Use case of Mandatory Dimension](images/agg/Mandatory-4.png)
+
+
+
+### <span id="hierarchy">Hierarchy Dimension</span>
+
+End users will usually use dimensions with hierarchical relationship, for example, country, province, and city. In this case, a hierarchical relationship can be set as **Hierachy Dimension**. From top to bottom, country, province and city are one-to-many relationship. These three dimensions can be grouped into three different combinations:
+
+1. group by country
+
+2. group by country, province(equivalent to group by province)
+
+3. group by country, province, city(equivalent to group by country, city or group by city)
+    ​
+    In the aggregate index group shown below, assume dimension A = Country, dimension B = Province and dimension C = City, then dimension ABC can be set as a hierarchy dimension. And index [A, C, D] = index [A, B, C, D],index [B, D] = index [A, B, D], thus, index [A, C, D] and index [B, D] can be pruned.
+
+    ![Hierarchy Dimension](images/agg/Hierarchy-2.png)
+
+    As the diagram below illustrates, based on the method above, Kylin can prune redundant index, hence reducing index from 15 to 7.
+
+    ![Reduce dimension combinations with Hierachy Dimension](images/agg/Hierarchy-3.png)
+
+
+
+
+#### Use Case of Hierarchy Dimension
+Assume a transactional aggregate index that includes dimensions transaction city `city`, transaction province `province`, transaction country `country` and payment type `pay_type`. Analysts will group transaction country, transaction province, transaction city, and payment type together to understand customer payment type preference in different geographical locations. In the example above, we recommend creating hierarchy dimensions in existing aggregate group (Country / Province / City) [...]
+
+![Use case of Hierarchy Dimension](images/agg/Hierarchy-4.png)
+
+Aggregate Group: `[country, province, city,pay_type]`
+
+Hierarchy Dimension: `[country, province, city]`
+
+
+Case 1: 
+
+Analysts want to understand city level customer payment type preferences
+
+```sql
+SELECT city, pay_type, count() FROM table GROUP BY city, pay_type
+```
+
+can be retrieved from index [country, province, city, pay_type].
+
+
+
+Case 2: 
+
+Analysts want to understand province level customer payment type preferences
+
+```sql
+SELECT province, pay_type, count() FROM table GROUP BY province, pay_type
+```
+
+can be retrieved from index [country, province, pay_type].
+
+
+
+Case 3: 
+
+Analysts want to understand customer's payment type preferences from country level
+
+```sql
+SELECT country, pay_type, count() FROM table GROUP BY country, pay_type
+```
+
+can be retrieved from index [country, pay_type].
+
+
+
+Case 4: 
+
+Analysts want to reach a different granularity level using the geographical dimension:
+
+```sql
+SELECT country, city, count(*) FROM table GROUP BY country, city
+```
+
+
+will retrieve data from index [country, province, city].
+
+
+
+### <span id="joint">Joint Dimension</span>
+
+Sometimes you don’t need details of all possible combination dimensions. For example, you might query dimension A, B, C together in most cases but not dimension A, C or dimension C alone. To enhance performance in this case, **Join Dimension** can be used. If A, B, and C are defined as Joint Dimension, Kylin will only build index ABC but not index AB, BC and A. Finally, indices will be built as below. The number of indices can then be reduced from 15 to 3.
+
+![Joint Dimension](images/agg/AGG-5.png)
+
+
+#### Use Case of Joint Dimension
+Assume a transactional aggregate index that includes dimension transaction date `cal_dt`, transaction city `city`, customer gender `sex_id`, payment type `pay_type`. Analysts usually need to group transaction date, transaction city, and customer gender to understand consumption preference for different genders in different cities, in this case, `cal_dt, city, sex_id `will be grouped together. In the case above, we recommend assigning them to joint dimensions based on existing aggregate g [...]
+
+![Use case of Joint Dimension](images/agg/AGG-6.png)
+
+Aggregate Group: `[cal_dt,city, sex_id,pay_type]`
+
+Joint Dimension:  `[cal_dt, city, sex_id]`
+
+
+
+Case 1: 
+
+```sql
+SELECT cal_dt,city, sex_id, count(*) FROM table GROUP BY cal_dt, city, sex_id
+```
+can retrieve data from index [cal_dt, city, sex_id].
+
+Case 2: 
+
+```sql
+SELECT cal_dt, city, count(*) FROM table GROUP BY cal_dt, city
+```
+then no index can be hit, Kylin will leave calculate result based on existing index. 
+
+**Dimensional cardinality product **
+
+Dimensional cardinality product: refers to the product of each dimension field cardinality in the joint dimension. The data of dimension cardinality comes from the sampling results of the source data table. The dimension cardinality product is used to represent the maximum number of permutations and combinations of dimensions in the joint index (i.e. the maximum number of pieces of this index).
+
+The dimension cardinality product does not participate in the index creation process, but is only used to assist in the creation of the union dimension of the aggregate group. In general, in order to ensure the query performance of the dimensions in the union dimension, the dimension cardinality product of a union dimension is not recommended to exceed 100,000, and in special cases (the dimensions in the union dimension must be queried together) can not pay attention to the value of the  [...]
+
+
+### <span id="mdc">Max Dimension Combination</span>
+
+The use of aggregate groups helps to avoid index number explosion. However, in order to achieve index optimization, modelers need to have a certain understanding of the data model, which is hard for the junior modelers.
+
+This chapter will introduce another simple index pruning tool named *Max Dimension Combination (MDC)*, which represents the maximum number of dimensions in every index. This tool limits the dimension number in a single index, which means indexes containing too many dimensions will not be built in index building process. This tool fits well in the situation where most queries only touch no more than N dimensions, where N is the MDC threshold that is configurable.
+
+> **Note**: MDC is only available from version 4.1.0.
+
+#### Dimensions Count in Query
+
+Next we introduce the method of counting the number of dimensions in a query. The number of dimensions in a query means the number of dimensions in the corresponding index of the query. For ordinary dimensions, one dimension is counted as 1. We treat a group of joint dimension or hierarchy dimension as one dimension when counting dimensions in a query, and ignore mandatory dimensions. For example,
+
+```sql
+select count(*) from table group by column_mandatory, column_joint1, column_joint2, column_hierarchy1, column_hierarchy2, column_normal
+```
+
+There is one mandatory dimension, two dimensions belonging to one joint dimension, two dimensions belonging to one hierarchy dimension and one normal dimension. So we treat them as 3 dimensions in index pruning.
+
+#### Schematic Diagram of Pruning
+
+![sprouting graph](images/mdc/index_mdc.png)
+
+This is an index sprouting graph  which has 7 dimensions and some details are hidden in order to help understanding.
+
+When MDC = 4, the indexes which have over 4 dimensions will be pruned, such as *ABCDEF*, *ABCDEG*, *ABCDE*, and *ABCDF*.
+
+When MDC = 3, the indexes which have over 3 dimensions will be pruned, such as *ABCDEF*, *ABCDEG*, *ABCD*, *ABCE*.
+
+Considering the performance in index building, if you choose to generate an index in the model settings that contains dimensions and measures defined in all aggregate groups, this index will not be pruned. At the same time, according to the calculation method of query dimensions mentioned before, we treat a group of joint dimension or hierarchy dimension as one dimension, and ignore mandatory dimensions. Therefore, when using the pruning tool, it is necessary to consider the actual numbe [...]
+
+#### Set Max Dimension Combination (MDC)
+
+We'll introduce how to set Max Dimension Combination (MDC) in this section. Click **+** (Add Aggregation Group), you can set **Aggregate index-level MDC** for all aggregate groups and **Aggregate group-level MDC** for a single aggregate group on the Edit Aggregation Index page, as shown in the figure below. Enter a positive integer in the input box and click OK to save the MDC setting.
+
+> **Note**: The setting of MDC will not take effect until you submit the Edit Aggregate Index page.
+
+![intro_mdc.en](images/mdc/intro_mdc.png)
+
+Aggregate index-level MDC affects all aggregate groups. Aggregate group-level MDC takes effect only for a single aggregate group. The priority of the aggregate group-level level MDC is higher than that of aggregate index-level MDC. The specific setting rules are as follows:
+
+1. If there is no MDC setting for a single aggregation group, the MDC of this aggregate group will be overridden by the aggregate index-level MDC. It can be seen that after the aggregate index-level MDC is set to 2, the number of indexes of the aggregate group decreases from 32 to 19, and the total number of indexes becomes 20. Except for an index that contains dimensions and measures defined in all aggregate groups, indexes that contain more than 2 dimensions are pruned.
+
+   ![Aggregate index-level MDC](images/mdc/total_mdc.png)
+
+2. If a single aggregate group has set its aggregate group-level MDC, the number of index dimensions for this aggregate group is limited only by its own MDC.
+
+   ![Aggregate group-level MDC](images/mdc/single_mdc.png)
+
+3. If the Aggregate index-level MDC is left blank, it means that you have turned off the setting of the MDC for all aggregate groups. There is no limit to the number of dimensions in the index generated by an aggregation group that has not individually set the aggregate group-level MDC at this time. Aggregate groups that have set the aggregate group-level MDC are not affected.
+
+#### Benefit and Trade-off
+
+On one hand, MDC dimension pruning tool reduces the index number and storage size significantly. On the other hand, some complex queries that cover more dimensions may hit large indexes, hence online calculation cannot be avoided, which may make query response slower. Like other index optimization tools, it's a kind of trade-off. If most queries touch fewer dimensions in your case, MDC deserves a shot.
+
+At the same time, according to laboratory test data, it is found that when the number of dimensions in the aggregate group is large, it may take several minutes to check index amount. During this period, there may be web UI lags, please be patient. The following are laboratory test data results for reference only. Please judge according to the actual scene:
+
+- 1 aggregate group with 1000 dimensions. When the aggregate index-level MDC is set to 1, the average time for checking index amount is 3.1 minutes.
+- 1 aggregate group with 1500 dimensions. When the aggregate index-level MDC is set to 1, the average time for checking index amount is 13.9 minutes.
+- 3 aggregate groups, each containing 500 dimensions. When the aggregate index-level MDC is set to 1, the average time for checking index amount is 3 minutes.
+
+### <span id="index">View Aggregate Index</span>
+
+Click **Data Asset -> Model** in the left navigation bar to enter the **Model List** page, then click the model name and  you can view the details of aggregate index in **Index-Index Overview**. 
+
+![Index](images/index_1.png)
+
+You can enter the index ID or index content in the **Search** box to filter the index. Index content includes the dimensions, measures, and columns contained in the index, the index content is fuzzy filtering, and the index ID is precisely filtered. You can click the button in the **Content** column to view the index details. What's more. you can edit and delete Custom(Table Index), and for other kinds of indexes, you can only delete.
+On the top of the list, you can see data range. Once you have already build index, it will show the time range of the data loaded in the index.If it is **full load**, the data range will show **full load**.
+
+If you need to delete some indexes, first you can select the indexes to be deleted, then click the **Delete** button above the list to delete.
+
+If you need to add base indexes when there is no base index or when a base index is missing. Click **+ Index** to add the base indexes in the drop-down box.
+
+**Fields Explanation:**
+
+- Index ID: Index ID.
+
+- Storage: The storage size of the precomputed data in the index.
+
+- Usage: The frequency of queries having hit this index.
+
+- Source: **Recommended** indicates that the index is derived from the system recommendation, and **Custom** indicates that the index is defined by user.
+
+- Status: The status of the index, divided into four types:
+
+  - NO BUILD: Index not yet been built. You can click the **Build Index** button to build all NO BUILD indices;
+
+  - BUILDING: Index being built. You can see the corresponding building job being executed in the job monitor page;
+
+  - ONLINE: Index has been built and can serve queries;
+
+  - LOCKED: Due to the modification of the aggregate group, the index to be deleted may be in LOCKED status. It can still serve queries.
+
+    > Tip: When you modify an aggregate group and save it, some aggregate indices may be refreshed. For example, adding a measure to an aggregate group will refresh all the aggregate indices corresponding to the aggregate group. In Kylin, the corresponding behavior is to delete the original indices and add new indices. In order to ensure that your service is available, especially the query service is available, the original indices will be placed in LOCKED status before the new indices a [...]
+    >
+    > If modifying the aggregate groups will only cause some indices to be deleted, there will be no LOCKED indices. An index cannot be restored after it has been deleted.
+
+- Content: Index content.
+
+- Action: The actions that can be performed on the current index, such as **Build Index**.
+
+The details page shows the combination of dimensions and all measures in the index. For aggregate indexes, the order is the dimension first, the measure is after, the dimension appears as  table. column, and for the table index, you will see all the columns in this index, the order, and the shardby columns.
+
+![Aggregate Detail](images/agg/agg_index_2.png)
+
+**Fields Explanation:**
+
+- Last Updated Time: The last time to update the index.
+- Content: Dimension name or measure name or columns.
+- Type: Dimension or Measure.
+- Cardinality: Cardinality of the column, which can be fetched by table sampling.
+- Shard by: ShardBy column of the index.
+
+### <span id="advanced">Advanced Settings</span>
+
+In the aggregate index of AI augmented mode, you can set the ShardBy column, and the data will be stored according to the column fragmentation to improve query efficiency. You can set the filter dimension or grouping dimension frequently used in the query as the ShardBy column according to the cardinality from high to low. In current version, you can only set one ShardBy column.
+
+> Note: The ShardBy column is applied to all custom aggregate indexes.
+
+In the navigation bar **Data Asset -> Model ** page, click the icon to the left of the specified model to expand the model for more information. You can see the **Advanced Setting** button in the **Index- Aggregate Group**.
+
+In the **Advanced Setting**, you can select the dimensions that need to be set as the ShardBy column. 
+
+Updating the ShardBy column will invalidate all aggregate indices that contain ShardBy columns and rebuild these indices to take effect. Checking **Build index now** when saving will trigger a new index build job. If not checked, you can click **Build** button to create building.
+
+![Advanced Settings](images/agg/advanced.png)
+
+#### Optimize Join with ShardBy Column
+Sometimes the subqueries of a join can match two indexes, Kylin will get data from the two indexes and do the join operation. When the cardinality of the subquery is high, the join operation may be time consuming. And user can optimize the join operation in such case by making the join key the ShardBy column.
+
+To enable this feature (it is enabled by default), user need to edit `kylin.properties` file and add the following line
+```
+kylin.storage.columnar.expose-sharding-trait=true
+```
+When this feature is enabled, Kylin will expose the sharding information to Spark. Join two large datasets in spark usually requires a data shuffle infront to make the data hash partitioned. As the ShardBy column already sharding the data, the shuffle stage can be skipped during the Join.
+
+##### Limitations
+1. As there can only be one ShardBy column for one model, the optimization applys for join on the single ShardBy column only.
+
+##### Example
+```
+select org_id, cust_id, sum1, sum2
+from (
+	select org_id, cust_id, sum(....) sum1
+	from fact
+	where dt = ...
+	group by org_id, cust_id
+) T1
+inner join (
+	select cust_id, sum(...) sum2
+	from fact
+	where dt = ...
+	group by cust_id
+) T2 on T1.cust_id = T2.cust_id
+```
+The above SQL can be queried by joining two aggregate indexes. To optimize the join operation, user can set the join key - `cust_id` as the ShardBy column. In that case the shuffle stage can be skipped for the join operation.
+
diff --git a/website/docs/modeling/model_design/computed_column.md b/website/docs/modeling/model_design/computed_column.md
new file mode 100644
index 0000000000..9ef35e6c56
--- /dev/null
+++ b/website/docs/modeling/model_design/computed_column.md
@@ -0,0 +1,206 @@
+---
+title: Computed Column
+language: en
+sidebar_label: Computed Column
+pagination_label: Computed Column
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - computed column
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+**Computed Column** allows you to predefine actions such as data transformation and calculation in models. It enhances the data semantic abstraction according to various business scenarios. By replacing runtime calculation with offline index construction, the pre-calculation capability of Kylin is fully utilized. As a result, query performance willl improve significantly. It supports Hive UDF in computed columns, so existing business codes can be reused.
+
+This section will describe the supported computed columns in Kylin, which includes the basic concepts, usage of computed columns and supported functions in computed columns.
+
+- [Basic Concepts](#concept)
+- [Create Computed Columns](#create)
+- [Edit Computed Columns](#edit)
+- [Use Computed Columns in Indices](#index)
+- [Query on Computed Column](#query)
+- [Nested Computed Columns](#nested)
+- [Advanced Functions](#function)
+- [Known Limitations](#limitation)
+
+### <span id="concept">Basic Concepts</span>
+
+- **Naming convention:** 
+  
+  Only letters, digits and underscores are supported. Pure digits or starting with underscores are NOT allowed.
+  
+- **Expression:** 
+  
+  Calculating logic. Expression of the computed column can contain columns across a fact table and a lookup table in current model.
+  
+- **Re-use and reference:**
+  
+  - Computed columns are separated by model and implicitly subjected to a fact table, thus, one expression cannot generate multiple computed columns on different models with different names.
+  - Strictly, computed columns cannot be reused across models in the same project. If you want to reuse a computed column in another model, you need to create a computed column with the same name and expression in the model.
+  - Nested computed column is also supported too, i.e. you can use available computed column to produce a new one.
+  
+- **Special notes:** 
+
+  One expression cannot generate multiple computed columns. Say we have two models `M1` and `M2` in the same project: 
+
+  - If both `M1` and `M2` share the same fact table `T1`, and the computed column `CC1 = T1.C1 + T1.C2` is defined on `M1`, you can create a computed column `CC1 = T1.C1 + T1.C2` rather than `CC2 = T1.C1 + T1.C2` on `M2`;
+  - If both `M1` and `M2` share the same fact table `T1`, and the computed column `CC1 = T1.C1 + T1.C2` is defined on `M1`, you can create a computed column `CC2 = T1.C1 * T1.C2` rather than `CC2 = T1.C1 + T1.C2` on `M2`;
+
+- **Others:**
+  - Computed columns can only be defined on the fact table by default. Only column expression, not column name, can be edited again after being created.
+  - Computed column can use either columns of fact table or dimension table. However, if columns in different table share the same name, please make sure that used columns are in the form of `table_alias.column_name`.
+  - Unexpected errors may occur when the computed column share the same name with other columns.
+  - Aggregate functions such as `SUM`, `MIN`, `MAX`, etc. are not allowed when defining the computed column expression.
+  - Expression only containing constants is not recommended to create computed column, such as  `POWER(CAST(2 AS DOUBLE), 2)`.
+  - If a function expression contains a column using the keywords as column name, it cannot be recommended as a computed column. For more details about keywords, please refer the **Keywords** section in  [SQL Specification](../../../query/insight/sql_spec.en.md). 
+  - Computed columns only support `Hive` data source in current version.
+  - JDBC-based syntax expressions only supported in smart mode project rather than in AI augmented mode project. For example, we cannot support `{FN CONVERT(PRICE, SQL_BIGINT)}` as a computed column expression in AI augmented mode project. 
+  - Computed columns cannot be used as time partition columns and join keys.
+  - If a function expression contains a table name or column name that is not started with letters or includes special characters, need to double quote this name. For example: `"100_KYLIN_SALES"."100_PRICE" * "100_KYLIN_SALES"."200_ITEM_COUNT"`
+
+### <span id="create">Create Computed Column</span>
+
+To help you master how to create a computed column, we demonstrate an example scenario below. Say you have a fact table named `P_LINEORDER` with following columns:
+
+- `LO_EXTENDEDPRICE`: transaction price
+- `LO_QUANTITY` transaction quantity
+- `LO_ORDERDATE` transaction date
+
+We want to define two computed columns on this fact table, `T_PRICE_PER_ITEM` to calculate the total transaction price for each item, and `YEAR_OF_ORDER` to indicate the year of the order. Concrete expressions are as follows:
+
+- `T_PRICE_PER_ITEM = P_LINEORDER.LO_EXTENDEDPRICE * P_LINEORDER.LO_QUANTITY` 
+- `YEAR_OF_ORDER = YEAR(P_LINEORDER.LO_ORDERDATE)`
+
+First, click the button **CC** marked in the picture. Then a window of **Computed Column** will pop up. 
+
+![Add a computed column](images/computed_column/cc_en_1.png)
+
+Secondly, click the button **+** in this window,  and a dialog box of **Add Computed Column** will pop up. Please fill in the following information:
+
+- `Column name`: Defines the name of the computed column.
+- `Expression`: Calculates the calculation logic for the column.
+
+![Define a computed column](images/computed_column/cc_en_2.png)
+
+Third, click the button **Submit**, the system will verify whether the name or the expression of the computed column is legal. If anything is wrong, the system will give you a tip, please correct it and resubmit. Once the computed column is created, you will see it on the fact table. As shown below, `T_PRICE_PER_ITEM` appears in the fact table `P_LINEORDER`.
+
+![Display a computed column](images/computed_column/cc_en_3.png)
+
+Finally, after creating the computed column, click the button **+** in the window of **Dimension** to add a new dimension which is based on the computed column `YEAR_OF_ORDER`, as shown below:
+
+![Add a dimension relies on computed column](images/computed_column/cc_en_4.png)
+
+Also, click the button **+** in the window of **Measure** to add a new measure `TOTAL_PRICE` which is based on computed column `T_PRICE_PER_ITEM`, as shown below:
+
+![Add a measure relies on computed column](images/computed_column/cc_en_5.png)
+
+
+### <span id="edit">Edit Computed Columns</span>
+
+In some cases, we need to change the expression to adapt some business scenario changes. At this point, we can modify the expressions of the computable columns directly by editing the model.
+
+![edit computed column](images/computed_column/cc_en_11.png)
+
+However, there are few limitations and attentions.  Please read the following limitations carefully before using it:
+
+- **NOT** support modifying the name of computed column
+- **NOT** support modifying the nested computed column. If the computed column has been used as a nested computed column. It will be failed to modify the expression and the following message will be shown: `model [model_name] nested computed column [column_name] still contains computed column [column_name]`.
+- The changes might rebuild **related indexes** under model, which will pop up a message for users to confirm.
+- The changes might cause measures to be invalid, then delete the measures, related aggregation groups and layouts, which will also pop up a message for users to confirm.
+
+### <span id="index">Use Computed Columns in Indices</span>
+
+Now we have defined two computed columns in our mode and produced a new dimension and a new measure. If we want to take advantage of precalculation of index, we need to use it when creating indexes. 
+
+You can use computed columns in an aggregate index or in a table index. Let's take an example. 
+
+Firstly, click the model name to get more information in the **Data Asset->Model** page. Then we need to click **Index** to enter the **Index Overview** page as shown below. Click **+ Index** to add index.
+
+![Use computed column](images/computed_column/cc_en_6.png)
+
+After the submission is successful, we have completed the basic usage about computed columns including:
+
+* Creates a computed column.
+* Creates dimensions and measures based on computed columns.
+* Defines computed columns in the index.
+
+### <span id="query">Query on Computed Column</span>
+
+A computed column is logically appended to the table's column list after creation. You can query the computed column as if it was a normal column as long as it is precalculated in an index.
+
+In order to improve query performance with computed columns, you need to define computed columns when creating an index.
+
+1. **Query Pushdown**
+
+   If a computed column neither used as a dimension nor defined in indices, the query performance will not be improved. However, if **QUERY PUSHDOWN** is enabled, users can also use this computed column. Specifically, Kylin will analyze and translate this query into a queryable SQL to calculation engine. 
+
+   Say if there defines a computed column named `T_PRICE_PER_ITEM`, corresponding expression is `LO_EXTENDEDPRICE * LO_QUANTITY`, then if you query the SQL statement below:
+
+   ```sql
+   select sum(T_PRICE_PER_ITEM) from SSB.P_LINEORDER
+   ```
+
+   It will be translated to a new queryable SQL as below:
+
+   ```sql
+   select sum(LO_EXTENDEDPRICE * LO_QUANTITY) from SSB.P_LINEORDER
+   ```
+
+   Then, it will be pushed down to the calculation engine. 
+
+   > **Note** : if you wish to query the computed column, the complete join relations defined in the model that defines the computed column must be supplied in the SQL.
+
+2. **Explicit Query**
+
+   If the name of computed column appears as a field or a parameter of functions, in a SQL statement. We call it **Explicit Query** on computed columns. For example:
+
+   ```sql
+   select sum(T_PRICE_PER_ITEM) from SSB.P_LINEORDER
+   ```
+
+3. **Implicit Query:** 
+
+   If the expression of computed column appears as a field or a parameter of functions, in a SQL statement. We call it **Implicit Query** on computed columns. For example:
+
+   ```sql
+   select sum(LO_EXTENDEDPRICE * LO_QUANTITY) from SSB.P_LINEORDER
+   ```
+
+   In Kylin 4.x, expression of `LO_EXTENDEDPRICE * LO_QUANTITY` will be converted to `T_PRICE_PER_ITEM`. Then the original query will be translated to the query as shown below:
+
+   ```sql
+   select sum(T_PRICE_PER_ITEM) from SSB.P_LINEORDER
+   ```
+
+   If measure `sum(T_PRICE_PER_ITEM)` has been precalculated in an Aggregate Index, the query performance will be greatly improved.
+
+   
+
+
+### <span id="nested">Nested Computed Columns</span>
+
+Expression of computed column can be nested by other computed columns. You can define a new computed column based on an existing computed column; this is called **Nested Computed Column**. The expression specification for nested computed columns is the same as normal computed columns.
+
+Here, we will introduce how to create a nested computed column. For example, we want to create column `D_PRICE_PER_ITEM = 2 * T_PRICE_PER_ITEM`. `D_PRICE_PER_ITEM` is the name of this nested computed column and  `T_PRICE_PER_ITEM` is a predefined computed column. 
+
+First, define a computed column named `T_PRICE_PER_ITEM`, click **Submit**
+![Create a computed column](images/computed_column/cc_en_2.png)
+
+Second, define a computed column named `D_PRICE_PER_ITEM` and the expression is `2 * T_PRICE_PER_ITEM`.
+
+![Create a nested computed column](images/computed_column/cc_en_8.png)
+
+After clicking **Submit** button, it will automatically validate the computed column expression. Then you will see the computed column information shown in the figure above.
+
+When creating nested computed columns, please make sure that the input computed column actually exists and the name is correct. Otherwise you will receive a prompt message that  `Computed Column ${Computed Column Name} use nonexistent column(s):${Column Name}`. You can re-submit after correction.
+
+
+
+### <span id="function">Advanced Functions</span>
+
+Computed column is pushed down to data source and Spark SQL is the query push-down engine in Kylin 4.x. Thus, the syntax of computed column expression should follow the grammar of Spark SQL.
diff --git a/website/docs/modeling/model_design/data_modeling.md b/website/docs/modeling/model_design/data_modeling.md
new file mode 100755
index 0000000000..9356225bdd
--- /dev/null
+++ b/website/docs/modeling/model_design/data_modeling.md
@@ -0,0 +1,66 @@
+---
+title: Model design overview
+language: en
+sidebar_label: Model design overview
+pagination_label: Model design overview
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - model design overview
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+Kylin model consists of multiple tables and their join relations. Kylin follows multidimensional modeling theory to build star or snowflake schemas based on tables. Kylin also leverages pre-computation technique and will reuse the pre-computed results to answer queries, so there's no need to traverse all data when there is a query, thus achieving sub-second query times on PB-level data.
+
+### Challenges when dealing with massive data
+
+When analyzing massive data, there are some techniques to speed up computing and storage, but they cannot change the time complexity of query, that is, query latency and data volume are linearly dependent. 
+
+If it takes 1 minute to query 100 million entries of data records, querying 10 billion data entries will take about 1 hour and 40 minutes. When companies want to analyze all business data piled up over the years or to add complexity to query, say, with more dimensions, queries will be running extremely slow or even time out. 
+
+### Accelerate query with Kylin pre-computation
+
+Kylin leverages pre-computation to avoid the computing pressure brought by the growing data volume. That is, Kylin will precompute the combinations of defined model dimensions and then store the aggregated results as indexes to shorten query latency. In addition, Kylin uses parallel computing and columnar storage techniques to improve computing and storage speed.  
+
+![](../images/reduceio.png)
+
+With pre-computation, the number of indexes will be determined by the dimension cardinality only, and will no longer undergo exponential growth as data volume increases. Taking the data analysis of online transactions as an example, with Kylin pre-computation, even if the volume of transaction data increases by 10 times, the query speed against the same analytical dimensions changes little. The computing time complexity can be kept at O(1), helping enterprises to analyze data more efficiently. 
+
+![](../images/responsetime_datavolume.png)
+
+### How to design model and indexes
+
+#### Manual modeling 
+
+In addition to intelligent modeling, Kylin also supports users to design their own models and indexes based on their business needs. Kylin provides step-by-step guidance on how to complete basic model settings, including dimensions, measures, join relationships, and indexes. For details, see [Manual modeling](../../model/manual_modeling.en.md). 
+
+#### Advanced model design
+
+Kylin offers various advanced features around models and indexes to help users quickly dig out the most valuable data. These features include: 
+
+- Accelerated model design: Kylin offers built-in [advanced measures](measure_design/intro.md) like count distinct and Top N to speed up modeling.  
+
+- Optimized index efficiency: Kylin uses the [data pruning](aggregation_group.md) technique to filter out less meaningful dimensions for index building efficiency. 
+
+For more information, see [Advanced model design](intro.md). 
+
+### Basic concepts 
+
+Kylin follows multidimensional modeling theory and decomposes complex concepts into specific functional modules to make modeling easier. Below are some of the basic concepts used in Kylin: 
+
+- Dimension: Dimension is a perspective of viewing data, which can be used to describe object attributes or characteristics, for example, product category.
+
+- Measure: Measure is an aggregated sum, which is usually a continuous value, for example, product sales. 
+
+- Model: Model consists of multiple tables and their join relations, as well as defined dimensions and measures.
+
+- Pre-computation: Pre-computation is the process of aggregating data based on model dimension combinations and of storing the results as indexes to accelerate data query.
+
+- Index: Index is used to accelerate data query. Indexes are divided into:
+  - Aggregate Index: Aggregate index is an aggregated combination of multiple dimensions and measures, and can be used to answer aggregate queries such as total sales for a given year.
+  - Table Index: Table index is a multi-level index in a wide table and can be used to answer detailed queries such as the last 100 transactions of a certain user. 
diff --git a/website/docs/modeling/model_design/images/agg/AGG-1.png b/website/docs/modeling/model_design/images/agg/AGG-1.png
new file mode 100755
index 0000000000..d24bce66fe
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/AGG-1.png differ
diff --git a/website/docs/modeling/model_design/images/agg/AGG-2.png b/website/docs/modeling/model_design/images/agg/AGG-2.png
new file mode 100644
index 0000000000..ed1ba4aeed
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/AGG-2.png differ
diff --git a/website/docs/modeling/model_design/images/agg/AGG-3.png b/website/docs/modeling/model_design/images/agg/AGG-3.png
new file mode 100644
index 0000000000..504049ad9c
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/AGG-3.png differ
diff --git a/website/docs/modeling/model_design/images/agg/AGG-4.png b/website/docs/modeling/model_design/images/agg/AGG-4.png
new file mode 100644
index 0000000000..9abea57e7c
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/AGG-4.png differ
diff --git a/website/docs/modeling/model_design/images/agg/AGG-5.png b/website/docs/modeling/model_design/images/agg/AGG-5.png
new file mode 100755
index 0000000000..1e9fccc3ee
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/AGG-5.png differ
diff --git a/website/docs/modeling/model_design/images/agg/AGG-6.png b/website/docs/modeling/model_design/images/agg/AGG-6.png
new file mode 100755
index 0000000000..2c20c7ec9f
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/AGG-6.png differ
diff --git a/website/docs/modeling/model_design/images/agg/Hierarchy-2.png b/website/docs/modeling/model_design/images/agg/Hierarchy-2.png
new file mode 100644
index 0000000000..b4bb2d573c
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/Hierarchy-2.png differ
diff --git a/website/docs/modeling/model_design/images/agg/Hierarchy-3.png b/website/docs/modeling/model_design/images/agg/Hierarchy-3.png
new file mode 100755
index 0000000000..fac1155e92
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/Hierarchy-3.png differ
diff --git a/website/docs/modeling/model_design/images/agg/Hierarchy-4.png b/website/docs/modeling/model_design/images/agg/Hierarchy-4.png
new file mode 100644
index 0000000000..ca8ff0f9ff
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/Hierarchy-4.png differ
diff --git a/website/docs/modeling/model_design/images/agg/Mandatory-2.png b/website/docs/modeling/model_design/images/agg/Mandatory-2.png
new file mode 100755
index 0000000000..ec5a9cd89a
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/Mandatory-2.png differ
diff --git a/website/docs/modeling/model_design/images/agg/Mandatory-3.png b/website/docs/modeling/model_design/images/agg/Mandatory-3.png
new file mode 100644
index 0000000000..f84e72c9f2
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/Mandatory-3.png differ
diff --git a/website/docs/modeling/model_design/images/agg/Mandatory-4.png b/website/docs/modeling/model_design/images/agg/Mandatory-4.png
new file mode 100755
index 0000000000..7476f15db5
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/Mandatory-4.png differ
diff --git a/website/docs/modeling/model_design/images/agg/advanced-EN-01.png b/website/docs/modeling/model_design/images/agg/advanced-EN-01.png
new file mode 100644
index 0000000000..9664e82894
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/advanced-EN-01.png differ
diff --git a/website/docs/modeling/model_design/images/agg/advanced-EN-02.png b/website/docs/modeling/model_design/images/agg/advanced-EN-02.png
new file mode 100644
index 0000000000..71a9cdf6e1
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/advanced-EN-02.png differ
diff --git a/website/docs/modeling/model_design/images/agg/advanced.png b/website/docs/modeling/model_design/images/agg/advanced.png
new file mode 100644
index 0000000000..db0d12b1b1
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/advanced.png differ
diff --git a/website/docs/modeling/model_design/images/agg/agg-group-1.png b/website/docs/modeling/model_design/images/agg/agg-group-1.png
new file mode 100644
index 0000000000..204e80c027
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/agg-group-1.png differ
diff --git a/website/docs/modeling/model_design/images/agg/agg-group-2.png b/website/docs/modeling/model_design/images/agg/agg-group-2.png
new file mode 100644
index 0000000000..fbf8dfc627
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/agg-group-2.png differ
diff --git a/website/docs/modeling/model_design/images/agg/agg-group-3.png b/website/docs/modeling/model_design/images/agg/agg-group-3.png
new file mode 100644
index 0000000000..84b0886ddd
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/agg-group-3.png differ
diff --git a/website/docs/modeling/model_design/images/agg/agg-group-4.png b/website/docs/modeling/model_design/images/agg/agg-group-4.png
new file mode 100644
index 0000000000..972bdbd443
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/agg-group-4.png differ
diff --git a/website/docs/modeling/model_design/images/agg/agg_detail.png b/website/docs/modeling/model_design/images/agg/agg_detail.png
new file mode 100644
index 0000000000..0037ff158e
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/agg_detail.png differ
diff --git a/website/docs/modeling/model_design/images/agg/agg_index_2.png b/website/docs/modeling/model_design/images/agg/agg_index_2.png
new file mode 100644
index 0000000000..c373629ade
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/agg_index_2.png differ
diff --git a/website/docs/modeling/model_design/images/agg/agg_measure.png b/website/docs/modeling/model_design/images/agg/agg_measure.png
new file mode 100644
index 0000000000..55d77592a2
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg/agg_measure.png differ
diff --git a/website/docs/modeling/model_design/images/agg_1.png b/website/docs/modeling/model_design/images/agg_1.png
new file mode 100644
index 0000000000..6aba059e6f
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg_1.png differ
diff --git a/website/docs/modeling/model_design/images/agg_2.png b/website/docs/modeling/model_design/images/agg_2.png
new file mode 100644
index 0000000000..45a897675d
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg_2.png differ
diff --git a/website/docs/modeling/model_design/images/agg_measure.png b/website/docs/modeling/model_design/images/agg_measure.png
new file mode 100644
index 0000000000..55d77592a2
Binary files /dev/null and b/website/docs/modeling/model_design/images/agg_measure.png differ
diff --git a/website/docs/modeling/model_design/images/auto_modeling/convert_no.en.png b/website/docs/modeling/model_design/images/auto_modeling/convert_no.en.png
new file mode 100644
index 0000000000..7981e6a728
Binary files /dev/null and b/website/docs/modeling/model_design/images/auto_modeling/convert_no.en.png differ
diff --git a/website/docs/modeling/model_design/images/auto_modeling/convert_or_not.en.png b/website/docs/modeling/model_design/images/auto_modeling/convert_or_not.en.png
new file mode 100644
index 0000000000..2ae045a66d
Binary files /dev/null and b/website/docs/modeling/model_design/images/auto_modeling/convert_or_not.en.png differ
diff --git a/website/docs/modeling/model_design/images/auto_modeling/convert_yes_model.en.png b/website/docs/modeling/model_design/images/auto_modeling/convert_yes_model.en.png
new file mode 100644
index 0000000000..4584b150f9
Binary files /dev/null and b/website/docs/modeling/model_design/images/auto_modeling/convert_yes_model.en.png differ
diff --git a/website/docs/modeling/model_design/images/auto_modeling/convert_yes_recommend_en.png b/website/docs/modeling/model_design/images/auto_modeling/convert_yes_recommend_en.png
new file mode 100644
index 0000000000..6ed4c8d4ae
Binary files /dev/null and b/website/docs/modeling/model_design/images/auto_modeling/convert_yes_recommend_en.png differ
diff --git a/website/docs/modeling/model_design/images/auto_modeling/import_sql.en.png b/website/docs/modeling/model_design/images/auto_modeling/import_sql.en.png
new file mode 100644
index 0000000000..9d4bc8e29b
Binary files /dev/null and b/website/docs/modeling/model_design/images/auto_modeling/import_sql.en.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_1.png b/website/docs/modeling/model_design/images/computed_column/cc_en_1.png
new file mode 100644
index 0000000000..0bc6b920dd
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_1.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_10.png b/website/docs/modeling/model_design/images/computed_column/cc_en_10.png
new file mode 100644
index 0000000000..3a68a601a4
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_10.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_11.png b/website/docs/modeling/model_design/images/computed_column/cc_en_11.png
new file mode 100644
index 0000000000..653d1e40f7
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_11.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_2.png b/website/docs/modeling/model_design/images/computed_column/cc_en_2.png
new file mode 100644
index 0000000000..6eef0e9f81
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_2.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_3.png b/website/docs/modeling/model_design/images/computed_column/cc_en_3.png
new file mode 100644
index 0000000000..a1ded30f20
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_3.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_4.png b/website/docs/modeling/model_design/images/computed_column/cc_en_4.png
new file mode 100644
index 0000000000..5a82aa283e
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_4.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_5.png b/website/docs/modeling/model_design/images/computed_column/cc_en_5.png
new file mode 100644
index 0000000000..124e5f5cb5
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_5.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_6.png b/website/docs/modeling/model_design/images/computed_column/cc_en_6.png
new file mode 100644
index 0000000000..296be288cb
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_6.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_7.png b/website/docs/modeling/model_design/images/computed_column/cc_en_7.png
new file mode 100644
index 0000000000..b9861a5468
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_7.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/cc_en_8.png b/website/docs/modeling/model_design/images/computed_column/cc_en_8.png
new file mode 100644
index 0000000000..dac6d7097c
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/cc_en_8.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/computed_column_en.7.png b/website/docs/modeling/model_design/images/computed_column/computed_column_en.7.png
new file mode 100644
index 0000000000..b0384eb0f8
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/computed_column_en.7.png differ
diff --git a/website/docs/modeling/model_design/images/computed_column/computed_column_en.8.png b/website/docs/modeling/model_design/images/computed_column/computed_column_en.8.png
new file mode 100644
index 0000000000..bf92f79bb0
Binary files /dev/null and b/website/docs/modeling/model_design/images/computed_column/computed_column_en.8.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures.png
new file mode 100644
index 0000000000..b583c4309c
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.1.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.1.png
new file mode 100644
index 0000000000..b8c0cfbcaf
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.1.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.2.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.2.png
new file mode 100644
index 0000000000..e190b7535e
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.2.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.3.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.3.png
new file mode 100644
index 0000000000..83a6431800
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.3.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.4.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.4.png
new file mode 100644
index 0000000000..7d306a51a7
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.4.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.5.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.5.png
new file mode 100644
index 0000000000..72a19cb9c4
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.5.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.7.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.7.png
new file mode 100644
index 0000000000..8acb551cf0
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.7.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.8.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.8.png
new file mode 100644
index 0000000000..f5e69a1e0f
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.8.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.9.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.9.png
new file mode 100644
index 0000000000..6899b18711
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.9.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.png
new file mode 100644
index 0000000000..6791944b4a
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_add_precisely.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add_precisely.png
new file mode 100644
index 0000000000..4fc3db6595
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_add_precisely.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_measures_edit.png b/website/docs/modeling/model_design/images/count_distinct/cd_measures_edit.png
new file mode 100644
index 0000000000..e190b7535e
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_measures_edit.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/cd_meausres_add.6.png b/website/docs/modeling/model_design/images/count_distinct/cd_meausres_add.6.png
new file mode 100644
index 0000000000..473fab2a1b
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/cd_meausres_add.6.png differ
diff --git a/website/docs/modeling/model_design/images/count_distinct/wd_datasample.png b/website/docs/modeling/model_design/images/count_distinct/wd_datasample.png
new file mode 100644
index 0000000000..19e45dc4b2
Binary files /dev/null and b/website/docs/modeling/model_design/images/count_distinct/wd_datasample.png differ
diff --git a/website/docs/modeling/model_design/images/import_sql.en.jpg b/website/docs/modeling/model_design/images/import_sql.en.jpg
new file mode 100644
index 0000000000..de7b5c69f9
Binary files /dev/null and b/website/docs/modeling/model_design/images/import_sql.en.jpg differ
diff --git a/website/docs/modeling/model_design/images/index_1.png b/website/docs/modeling/model_design/images/index_1.png
new file mode 100644
index 0000000000..8d65f60ee2
Binary files /dev/null and b/website/docs/modeling/model_design/images/index_1.png differ
diff --git a/website/docs/modeling/model_design/images/mdc/index_mdc.png b/website/docs/modeling/model_design/images/mdc/index_mdc.png
new file mode 100644
index 0000000000..2e087260a6
Binary files /dev/null and b/website/docs/modeling/model_design/images/mdc/index_mdc.png differ
diff --git a/website/docs/modeling/model_design/images/mdc/intro_mdc.png b/website/docs/modeling/model_design/images/mdc/intro_mdc.png
new file mode 100644
index 0000000000..d023b095a0
Binary files /dev/null and b/website/docs/modeling/model_design/images/mdc/intro_mdc.png differ
diff --git a/website/docs/modeling/model_design/images/mdc/single_mdc.png b/website/docs/modeling/model_design/images/mdc/single_mdc.png
new file mode 100644
index 0000000000..63b5d024dc
Binary files /dev/null and b/website/docs/modeling/model_design/images/mdc/single_mdc.png differ
diff --git a/website/docs/modeling/model_design/images/mdc/total_mdc.png b/website/docs/modeling/model_design/images/mdc/total_mdc.png
new file mode 100644
index 0000000000..f4502c7d1c
Binary files /dev/null and b/website/docs/modeling/model_design/images/mdc/total_mdc.png differ
diff --git a/website/docs/modeling/model_design/images/model_SCD1.png b/website/docs/modeling/model_design/images/model_SCD1.png
new file mode 100644
index 0000000000..934e538a5e
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_SCD1.png differ
diff --git a/website/docs/modeling/model_design/images/model_SCD2.png b/website/docs/modeling/model_design/images/model_SCD2.png
new file mode 100644
index 0000000000..57fa5daf02
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_SCD2.png differ
diff --git a/website/docs/modeling/model_design/images/model_check/24_model_diagnose_1.png b/website/docs/modeling/model_design/images/model_check/24_model_diagnose_1.png
new file mode 100644
index 0000000000..47d6afe09e
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_check/24_model_diagnose_1.png differ
diff --git a/website/docs/modeling/model_design/images/model_check/24_model_diagnose_2.png b/website/docs/modeling/model_design/images/model_check/24_model_diagnose_2.png
new file mode 100644
index 0000000000..584e579ecc
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_check/24_model_diagnose_2.png differ
diff --git a/website/docs/modeling/model_design/images/model_check/24_model_diagnose_4.png b/website/docs/modeling/model_design/images/model_check/24_model_diagnose_4.png
new file mode 100644
index 0000000000..46c6d36cef
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_check/24_model_diagnose_4.png differ
diff --git a/website/docs/modeling/model_design/images/model_check/24_model_diagnose_6.png b/website/docs/modeling/model_design/images/model_check/24_model_diagnose_6.png
new file mode 100644
index 0000000000..6e02793523
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_check/24_model_diagnose_6.png differ
diff --git a/website/docs/modeling/model_design/images/model_check/25_model_check.png b/website/docs/modeling/model_design/images/model_check/25_model_check.png
new file mode 100644
index 0000000000..86426f368d
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_check/25_model_check.png differ
diff --git a/website/docs/modeling/model_design/images/model_check/25_model_save.png b/website/docs/modeling/model_design/images/model_check/25_model_save.png
new file mode 100644
index 0000000000..b1cc0f8647
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_check/25_model_save.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/add_table.png b/website/docs/modeling/model_design/images/model_design/add_table.png
new file mode 100644
index 0000000000..69c4ebd63b
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/add_table.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/dimension.png b/website/docs/modeling/model_design/images/model_design/dimension.png
new file mode 100644
index 0000000000..bb16202ae3
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/dimension.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/dimension_2.png b/website/docs/modeling/model_design/images/model_design/dimension_2.png
new file mode 100644
index 0000000000..49fe9e8daa
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/dimension_2.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/join.png b/website/docs/modeling/model_design/images/model_design/join.png
new file mode 100644
index 0000000000..5c4fbfd9e6
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/join.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/measure.png b/website/docs/modeling/model_design/images/model_design/measure.png
new file mode 100644
index 0000000000..7a4110a936
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/measure.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/measure_2.png b/website/docs/modeling/model_design/images/model_design/measure_2.png
new file mode 100644
index 0000000000..eb35cfd448
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/measure_2.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/measure_3.png b/website/docs/modeling/model_design/images/model_design/measure_3.png
new file mode 100644
index 0000000000..925895a4c6
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/measure_3.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/model_edit_page.png b/website/docs/modeling/model_design/images/model_design/model_edit_page.png
new file mode 100644
index 0000000000..72a4b4520f
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/model_edit_page.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/model_filter_condition.en.png b/website/docs/modeling/model_design/images/model_design/model_filter_condition.en.png
new file mode 100644
index 0000000000..61a5d63688
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/model_filter_condition.en.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/model_result.png b/website/docs/modeling/model_design/images/model_design/model_result.png
new file mode 100644
index 0000000000..adf9efec54
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/model_result.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/partition_en.png b/website/docs/modeling/model_design/images/model_design/partition_en.png
new file mode 100644
index 0000000000..4f70f6a72b
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/partition_en.png differ
diff --git a/website/docs/modeling/model_design/images/model_design/sync_hive_comment_en.png b/website/docs/modeling/model_design/images/model_design/sync_hive_comment_en.png
new file mode 100644
index 0000000000..8e0ab1a8b4
Binary files /dev/null and b/website/docs/modeling/model_design/images/model_design/sync_hive_comment_en.png differ
diff --git a/website/docs/modeling/model_design/images/percentile_approximate/1.en.png b/website/docs/modeling/model_design/images/percentile_approximate/1.en.png
new file mode 100644
index 0000000000..7a7ee394a5
Binary files /dev/null and b/website/docs/modeling/model_design/images/percentile_approximate/1.en.png differ
diff --git a/website/docs/modeling/model_design/images/percentile_approximate/cube_query.en.png b/website/docs/modeling/model_design/images/percentile_approximate/cube_query.en.png
new file mode 100644
index 0000000000..7377b3114f
Binary files /dev/null and b/website/docs/modeling/model_design/images/percentile_approximate/cube_query.en.png differ
diff --git a/website/docs/modeling/model_design/images/percentile_approximate/return_type.en.png b/website/docs/modeling/model_design/images/percentile_approximate/return_type.en.png
new file mode 100644
index 0000000000..ef1567ebcd
Binary files /dev/null and b/website/docs/modeling/model_design/images/percentile_approximate/return_type.en.png differ
diff --git a/website/docs/modeling/model_design/images/review_model.png b/website/docs/modeling/model_design/images/review_model.png
new file mode 100644
index 0000000000..9ab12c2374
Binary files /dev/null and b/website/docs/modeling/model_design/images/review_model.png differ
diff --git a/website/docs/modeling/model_design/images/scd2/historical_dimension_table_switch.png b/website/docs/modeling/model_design/images/scd2/historical_dimension_table_switch.png
new file mode 100644
index 0000000000..7e6c7f2388
Binary files /dev/null and b/website/docs/modeling/model_design/images/scd2/historical_dimension_table_switch.png differ
diff --git a/website/docs/modeling/model_design/images/scd2/model_SCD2_5x.png b/website/docs/modeling/model_design/images/scd2/model_SCD2_5x.png
new file mode 100644
index 0000000000..84cc573f29
Binary files /dev/null and b/website/docs/modeling/model_design/images/scd2/model_SCD2_5x.png differ
diff --git a/website/docs/modeling/model_design/images/scd2/model_historical_dimension_table_scd2.png b/website/docs/modeling/model_design/images/scd2/model_historical_dimension_table_scd2.png
new file mode 100644
index 0000000000..ac109748f9
Binary files /dev/null and b/website/docs/modeling/model_design/images/scd2/model_historical_dimension_table_scd2.png differ
diff --git a/website/docs/modeling/model_design/images/scd2/model_historical_dimension_table_scd2_join.png b/website/docs/modeling/model_design/images/scd2/model_historical_dimension_table_scd2_join.png
new file mode 100644
index 0000000000..b5e0951755
Binary files /dev/null and b/website/docs/modeling/model_design/images/scd2/model_historical_dimension_table_scd2_join.png differ
diff --git a/website/docs/modeling/model_design/images/table_index/table_index_disable.png b/website/docs/modeling/model_design/images/table_index/table_index_disable.png
new file mode 100644
index 0000000000..867642b278
Binary files /dev/null and b/website/docs/modeling/model_design/images/table_index/table_index_disable.png differ
diff --git a/website/docs/modeling/model_design/images/table_index/table_index_enable.png b/website/docs/modeling/model_design/images/table_index/table_index_enable.png
new file mode 100644
index 0000000000..3157180d0d
Binary files /dev/null and b/website/docs/modeling/model_design/images/table_index/table_index_enable.png differ
diff --git a/website/docs/modeling/model_design/images/table_index/table_index_encode.png b/website/docs/modeling/model_design/images/table_index/table_index_encode.png
new file mode 100644
index 0000000000..4468b92cd8
Binary files /dev/null and b/website/docs/modeling/model_design/images/table_index/table_index_encode.png differ
diff --git a/website/docs/modeling/model_design/images/table_index/table_index_index.png b/website/docs/modeling/model_design/images/table_index/table_index_index.png
new file mode 100644
index 0000000000..76313978dd
Binary files /dev/null and b/website/docs/modeling/model_design/images/table_index/table_index_index.png differ
diff --git a/website/docs/modeling/model_design/images/table_index/table_index_sortby.png b/website/docs/modeling/model_design/images/table_index/table_index_sortby.png
new file mode 100644
index 0000000000..c7054dd917
Binary files /dev/null and b/website/docs/modeling/model_design/images/table_index/table_index_sortby.png differ
diff --git a/website/docs/modeling/model_design/images/table_index_1.png b/website/docs/modeling/model_design/images/table_index_1.png
new file mode 100644
index 0000000000..8e33989b40
Binary files /dev/null and b/website/docs/modeling/model_design/images/table_index_1.png differ
diff --git a/website/docs/modeling/model_design/images/table_index_2.png b/website/docs/modeling/model_design/images/table_index_2.png
new file mode 100644
index 0000000000..5c642138a4
Binary files /dev/null and b/website/docs/modeling/model_design/images/table_index_2.png differ
diff --git a/website/docs/modeling/model_design/images/table_index_3.png b/website/docs/modeling/model_design/images/table_index_3.png
new file mode 100644
index 0000000000..ceb17808c2
Binary files /dev/null and b/website/docs/modeling/model_design/images/table_index_3.png differ
diff --git a/website/docs/modeling/model_design/images/topN_1.png b/website/docs/modeling/model_design/images/topN_1.png
new file mode 100644
index 0000000000..b9da18aa7f
Binary files /dev/null and b/website/docs/modeling/model_design/images/topN_1.png differ
diff --git a/website/docs/modeling/model_design/images/topN_measure_edit.png b/website/docs/modeling/model_design/images/topN_measure_edit.png
new file mode 100644
index 0000000000..28db73fe69
Binary files /dev/null and b/website/docs/modeling/model_design/images/topN_measure_edit.png differ
diff --git a/website/docs/modeling/model_design/intro.md b/website/docs/modeling/model_design/intro.md
new file mode 100644
index 0000000000..ca8e595b7c
--- /dev/null
+++ b/website/docs/modeling/model_design/intro.md
@@ -0,0 +1,17 @@
+---
+title: Advanced model design
+language: en
+sidebar_label: Advanced model design
+pagination_label: Advanced model design
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - advanced model design
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+In this chapter, we take the Kylin built-in dataset as an example, to show you how to design a data model.
diff --git a/website/docs/modeling/model_design/measure_design/collect_set.md b/website/docs/modeling/model_design/measure_design/collect_set.md
new file mode 100644
index 0000000000..f2568efb58
--- /dev/null
+++ b/website/docs/modeling/model_design/measure_design/collect_set.md
@@ -0,0 +1,69 @@
+---
+title: COLLECT_SET
+language: en
+sidebar_label: COLLECT_SET
+pagination_label: COLLECT_SET
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - collect_set
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+From v5, Kylin supports the COLLECT_SET function, which returns a set of unique elements as an array. The syntax is `COLLECT_SET(column)`. In smart mode, the product recommends the index containing the COLLECT_SET measure based on your query. In AI augmented mode the COLLECT_ SET measure is customizable.
+
+
+
+### Use Case
+
+Let’s use the project created in the chapter [Tutorial](../../../quickstart/expert_mode_tutorial.md) as an example to introduce COLLECT_ SET measure settings. This project uses the SSB Dataset and needs to complete the model design and index build (including data load). A model won't be able to serve any queries if it has no index and data. You can read [Model Design Basics](../data_modeling.md) to understand more about the methods used in model design. 
+
+We will use the fact table `SSB.P_LINEORDER`. This sample table is a mockup of transactions that can happen in an online marketplace. It has a couple of dimension and measure columns. For easy understanding, we will only use two columns: `LO_CUSTKEY` and `LO_ORDERDATE`. The table below gives an introduction of these columns.
+
+| Column       | Description |
+| :----------- | :---------- |
+| LO_CUSTKEY   | Customer ID |
+| LO_ORDERDATE | Order Date |
+
+We want to query the order date combination of each customer. The order date will be returned as an array with deduplicated values. The query example is below:
+
+```sql
+SELECT LO_CUSTKEY, COLLECT_SET(LO_ORDERDATE)
+FROM SSB.P_LINEORDER 
+GROUP BY LO_CUSTKEY
+```
+
+Before the **COLLECT_SET** measure is added, the query will pushdown to Hive. According to the data amount of source tables, the result may return in several minutes or more.
+
+
+
+### How to Use
+
+Please add a measure in the model editing page as follows. Please fill in the measure **Name**, for example `COLLECT_SET_ORDERDATE`, select **Function** as **COLLECT_SET**. Finally select the target column from the dropdown list, for example `P_LINEORDER.LO_ORDERDATE`.
+
+![Add Collect_Set Measure](images/add_collect_set.png)
+
+Once the measure is added and the model is saved, click **Add Index** in the pop-up window to enter the **Model Index** page. You need to click **+**(Add Aggregate Group) under the **Aggregate Group** tab, add the corresponding dimensions and measures to the appropriate aggregate group according to your business scenario, and the new aggregate index will be generated after submission. In this example, the new index will contain the dimension `LO_CUSTKEY` and the measure ` COLLECT_SET(LO_ [...]
+
+Resubmit the above SQL query in the **Query -> Insight** page, and you will find the result as below:
+
+![Query Result](images/collect_result.png)
+
+If you need to create a model from the very beginning and add a COLLECT_SET measure, please add some indexes and load data into the model. A model won't be able to serve any query if it has no index and data. You can read this chapter [Model Design Basics](../data_modeling.md) to understand the method of model design.
+
+In actual application scenarios, you can use the COLLECT_SET function in combination with other functions to apply more analysis scenarios. For example, the following query combines the CONCAT_WS function, which  the values in the array of order date into a string and splits it with `;`:
+
+```sql
+SELECT LO_CUSTKEY, CONCAT_WS(';', COLLECT_SET(LO_ORDERDATE))
+FROM SSB.P_LINEORDER 
+GROUP BY LO_CUSTKEY
+```
+
+![Query Result](images/concatws_result.png)
+
+> **Note**: The CONCAT_WS function is only supported in conjunction with the COLLECT_SET function when querying.
diff --git a/website/docs/modeling/model_design/measure_design/corr.md b/website/docs/modeling/model_design/measure_design/corr.md
new file mode 100644
index 0000000000..b695b92ebd
--- /dev/null
+++ b/website/docs/modeling/model_design/measure_design/corr.md
@@ -0,0 +1,53 @@
+---
+title: CORR 
+language: en
+sidebar_label: CORR 
+pagination_label: CORR 
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - corr 
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+Correlation coefficient is used in statics to measure how strong a relationship is between two variables. The Correlation coefficient is using Pearson correlation underneath.
+
+
+
+### CORR Introduction
+
+CORR function will return a value to find the relationship between the two variables. The value is between -1 and 1, where 1 indicates a strong positive relationship. For example, for every positive increase in one variable, there will be a positive increase in another variable. 0 means that there is no relationship between the two variables and -1 means that when one variable has a positive increase, another one will have a negative decrease. Its syntax is as below:
+
+> corr({col1},{col2}), col1, col2 is the column to calculate the correlation. It should note that, in the current version, the parameter return type for function CORR must be one of *real*, *bigint*, *integer*, *int4*, *long8*, *tinyint*, *smallint*, *decimal*, *double*, *float* and *numeric*. Date column is not supported to calculate now.
+>
+
+The query example in Kylin is as below:
+
+```sql
+SELECT corr(ITEM_COUNT, PRICE)
+FROM TEST_KYLIN_FACT
+```
+
+
+### How to Use 
+
+Step 1, in the model edit view, click *Add* in the *Measure* sheet
+
+![Adding Measure](images/corr_add_measure.png)
+
+Step 2, select the *CORR* function and corresponding columns
+
+![Choosing Expression](images/corr_edit_measure.png)
+
+Step 3, after saving the model and building the indexes, users can do queries in the *Insight* page
+
+![SQL Query](images/corr_query.png)
+
+### Notes
+1. The *CORR* measure may need to create a bunch of internal measuers (which are hidden from the end user) and computed columns on the model. Kylin will create and manage those internal measures itself underneath. If you wish to delete the auto generated computed columns, you should remove the corresponding corr measure first.
+2. The correlation result might be inaccurate if there are null values in the input columns. Users are suggested to put *Data Filter Conditions* when saving models to filter out the null values if there is a *CORR* measure.
+
diff --git a/website/docs/modeling/model_design/measure_design/count_distinct_bitmap.md b/website/docs/modeling/model_design/measure_design/count_distinct_bitmap.md
new file mode 100644
index 0000000000..874ed7df05
--- /dev/null
+++ b/website/docs/modeling/model_design/measure_design/count_distinct_bitmap.md
@@ -0,0 +1,46 @@
+---
+title: Count Distinct (Precise)
+language: en
+sidebar_label: Count Distinct (Precise)
+pagination_label: Count Distinct (Precise)
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - count distinct (precise)
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+Count Distinct is a frequently used function for many data analysts. We implement precise Count Distinct based on bitmap. For the data with type tiny int(byte), small int(short) and int, the value projects onto the bitmap directly. For the data with type long, string, and others, encode the value as String onto a dict and project the dict id onto the bitmap. The resulting measure is the serialized data of the bitmap, not just the count value. This ensures results are always correct withi [...]
+
+In the project of Kylin 5 the Count Distinct (Precise) measure is customizable.
+
+
+
+### Prerequisite
+
+Before using the Count Distinct query, you need to clarify if the target column is ready. Click **Data Asset->Model**, select a model and click **Edit** to enter the model edit page. Then click **M** in the top right page to extend **Measure** table. You can get measure information in this table.  If the measure desired has been pre-calculated on precise Count Distinct syntax (here requires both `Function` to be count_distinct and `Return Type` to be bitmap) then this measure is ready fo [...]
+
+
+
+### Count Distinct Precision Setting 
+
+Let’s use the project created in the chapter [Tutorial](../../../quickstart/expert_mode_tutorial.md) as an example to introduce count distinct precision measure settings. This project uses the SSB Dataset and needs to complete the model design and index build (including data load). A model won't be able to serve any queries if it has no index and data. You can read [Model Design Basics](../data_modeling.md) to understand more about the methods used in model design. 
+
+Please add a measure in the model editing page as follows. Please fill in the measure **Name**, such as `DISTINCT_CUSTOMER`, select **Function** as **COUNT_DISTINCT**, select accuracy requirement from **Function Parameter**, and finally select the target column from the drop-down list.
+
+Kylin offers both approximate Count Distinct function and precise Count Distinct function. To get the pre-calculated precise Count Distinct value, select the `Function Parameter: Precisely` based on the bitmap, it will return a no error result if the storage resource is sufficient. For instance, when the Count Distinct is value over millions, the one result size might be hundreds of megabytes. 
+
+> **Note:** The query of precise Count Distinct is based on bitmap, so it will consume more resources. 
+
+![Add precisely COUNT_DISTINCT measure](images/cd_measures_add_precisely.png)
+
+Once the measure is added and the model is saved, you need to go to the **Edit Aggregate Index** page, add the corresponding dimensions and measures to the appropriate aggregate group according to your business scenario, and the new aggregate index will be generated after submission. You need to build index and load data to complete the precomputation of the target column. You can check the job of Build Index in the Job Monitor page. After the index is built, you can use the **Count Dist [...]
+
+If you need to create a model from the very beginning and add a Count Distinct (Precise) measure, please add some indices and load data into the model. A model won't be able to serve any query if it has no index and data. You can read this chapter [Model Design Basics](../data_modeling.md) to understand the methods used in model design.
+
+For more information about approximate Count Distinct function, please refer to [Count Distinct (Approximate)](count_distinct_hllc.md) Introduction.
diff --git a/website/docs/modeling/model_design/measure_design/count_distinct_case_when_expr.md b/website/docs/modeling/model_design/measure_design/count_distinct_case_when_expr.md
new file mode 100644
index 0000000000..ff8eaf122d
--- /dev/null
+++ b/website/docs/modeling/model_design/measure_design/count_distinct_case_when_expr.md
@@ -0,0 +1,73 @@
+---
+title: Count Distinct Case When Expression
+language: en
+sidebar_label: Count Distinct Case When Expression
+pagination_label: Count Distinct Case When Expression
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - count distinct case when expression
+    - case when
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+
+In some data analysis scenarios, you maybe encounter the SQL usage of Count Distinct Case When Expression.
+
+In previous versions, if you want to speed up such queries through model pre-calculation, you need to set Case When Expression as a computable column, and then set the Count Distinct Computed Column metric to answer such queries.
+
+Starting from Kylin V5, we have provided special optimizations for this type of query, allowing users to only set the Count (Distinct Column) measure,
+the system uses the pre-calculated results and adds some Case When Expression online calculations to fully answer the query, reducing the complexity of model settings and improving user experience.
+
+### How to Use
+
+1. Enable optimization
+
+This function is disable by default, and it can be enabled on the system or project level.
+
+To enable it on the system level, configure the parameters in `$KYLIN_HOME/conf/kylin.properties` . To enable it on project level, add the configuration in **Setting-Advanced Settings-Custom Project Configuration**.
+
+```
+kylin.query.convert-count-distinct-expression-enabled=true
+```
+
+2. Supported Count Distinct Case When Expression syntax
+
+```
+count(distinct case when {condition} then {column} else null end)
+```
+
+Notice:
+
+a. {condition} is dimension column expression, for example `cal_dt = '2012-01-01'`.
+
+b. The {column} must be set to the `count (distinct column)` measure.
+
+c.When selecting the error option in the function parameter, the return type must be selected: precisely, otherwise the optimization of this syntax cannot be triggered
+
+![Add precisely COUNT_DISTINCT measure](images/cd_measures_add_precisely.png)
+
+After the function is enable, queries that conform to the above grammar can be answered by indexes that include **dimension column** and `count(distinct column)`**measure** in the `condition` expression.
+
+Example:
+
+```
+count(distinct (case when cal_dt = date'2012-01-01' then price else null end))
+```
+
+It can be answered by indexes including `cal_dt` dimension and `count(distinct price)` measure.
+
+
+### Known Limitation
+
+1. Else can only be with null, constants are not supported temporarily, such as `case when ... then column1 else 1 end`.
+Starting from KE 4.5.4 GA version, after else can be cast(null as `type`), such as `case when ... then column1 else cast(null as double) end`.
+It should be noted that `type` should be as close as possible to `column1` The type is the same or the same category,
+otherwise it may not conform to the sql syntax and an error will be reported, or this function cannot be applied. 
+The major category refers to the same numeric type, date type, Boolean type, etc.
+
+2. Only one pair of `when .. then ..` is supported after case, and multiple pairs are not supported for now, such as `case when .. then column1 when ... then column2 else null end`.
diff --git a/website/docs/modeling/model_design/measure_design/count_distinct_hllc.md b/website/docs/modeling/model_design/measure_design/count_distinct_hllc.md
new file mode 100644
index 0000000000..8e8e90d54f
--- /dev/null
+++ b/website/docs/modeling/model_design/measure_design/count_distinct_hllc.md
@@ -0,0 +1,51 @@
+---
+title: Count Distinct (Approximate)
+language: en
+sidebar_label: Count Distinct (Approximate)
+pagination_label: Count Distinct (Approximate)
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - Count Distinct (Approximate)
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+Count Distinct is a frequently used function for many data analysts, which is used to calculate the number of unique elements in a (multiple) set. However, it will spend lots of resources to calculate the exact Count Distinct value in big data scenario. Since v2.1, this product implements approximately Count Distinct using [HyperLogLog](https://hal.inria.fr/hal-00406166/document) algorithm, offers serveral precisions, with the error rates from 9.75% to 1.22% to support different query needs.
+
+> **Note:** If you don't require a particularly precise result, this approximate Count Distinct query will return a good approximation with limited storage resources.
+
+In the project of Kylin 5, you can customize Count Distinct (Approximate) measure with 5 accurracy options:
+
+- Error Rate < 9.75%
+- Error Rate < 4.88%
+- Error Rate < 2.44%
+- Error Rate < 1.72%
+- Error Rate < 1.22%
+
+
+### Prerequisite
+
+Let’s use the project created in the chapter [Tutorial](../../../quickstart/expert_mode_tutorial.md) as an example to introduce approximate count distinct measure settings. This project uses the SSB Dataset and needs to complete the model design and index build (including data load). A model won't be able to serve any queries if it has no index and data. You can read [Model Design Basics](../data_modeling.md) to understand more about the methods used in model design. 
+
+Before using Count Distinct query, you need to check the target column is ready. You can get measure information in the model editing page. If the desire measure has been pre-calculated on approximate Count Distinct syntax (requires both `Function` to be count_distinct and `Return Type` to be hllc), then this measure is ready for Count Distinct querying. Otherwise, you need to add a new measure Count Distinct (Approximate) first.
+
+### Measure Edit
+
+Please add a measure in the model editing page as follows. Please fill in the measure **Name**, such as `DISTINCT_SHIPPRIOTITY`, select **Function** as **COUNT_DISTINCT**, select accuracy requirement from **Function Parameter**, and finally select the target column from the drop-down list.
+
+![Add approximate COUNT_DISTINCT measure](images/cd_measures_edit.png)
+
+Once the measure is added and the model is saved, click **Add Index** in the pop-up window to enter the **Model Index** page. You need to click **+**(Add Aggregate Group) under the **Aggregate Group** tab, add the corresponding dimensions and measures to the appropriate aggregate group according to your business scenario, and the new aggregate index will be generated after submission. You need to build index and load data to complete the precomputation of the target column. You can check [...]
+
+```sql
+SELECT COUNT(DISTINCT P_LINEORDER.LO_SHIPPRIOTITY)
+FROM SSB.P_LINEORDER
+```
+If you need to create a model from the very beginning and add a Count Distinct (Approximate) measure, please add some indices and load data into the model. A model won't be able to serve any query if it has no index and data. You can read this chapter [Model Design Basics](../data_modeling.md) to understand the method of model design.
+
+More information about precise Count Distinct function, please refer to [Count Distinct (Approximate)](count_distinct_bitmap.md) Introduction.
diff --git a/website/docs/modeling/model_design/measure_design/images/add_collect_set.png b/website/docs/modeling/model_design/measure_design/images/add_collect_set.png
new file mode 100644
index 0000000000..056e8158cf
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/add_collect_set.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/cd_measures_add_precisely.png b/website/docs/modeling/model_design/measure_design/images/cd_measures_add_precisely.png
new file mode 100644
index 0000000000..a9591b367c
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/cd_measures_add_precisely.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/cd_measures_edit.png b/website/docs/modeling/model_design/measure_design/images/cd_measures_edit.png
new file mode 100644
index 0000000000..45930e82b7
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/cd_measures_edit.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/collect_result.png b/website/docs/modeling/model_design/measure_design/images/collect_result.png
new file mode 100644
index 0000000000..7ed5c57d0a
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/collect_result.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/concatws_result.png b/website/docs/modeling/model_design/measure_design/images/concatws_result.png
new file mode 100644
index 0000000000..376afafa5a
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/concatws_result.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/corr_add_measure.png b/website/docs/modeling/model_design/measure_design/images/corr_add_measure.png
new file mode 100644
index 0000000000..3335fb9caf
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/corr_add_measure.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/corr_edit_measure.png b/website/docs/modeling/model_design/measure_design/images/corr_edit_measure.png
new file mode 100644
index 0000000000..3ea8b33d20
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/corr_edit_measure.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/corr_query.png b/website/docs/modeling/model_design/measure_design/images/corr_query.png
new file mode 100644
index 0000000000..16805b2384
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/corr_query.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/percentile_approximate.png b/website/docs/modeling/model_design/measure_design/images/percentile_approximate.png
new file mode 100644
index 0000000000..5089e8ec31
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/percentile_approximate.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/percentile_result.png b/website/docs/modeling/model_design/measure_design/images/percentile_result.png
new file mode 100644
index 0000000000..aa4e1bfd35
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/percentile_result.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/percentile_result_hive.png b/website/docs/modeling/model_design/measure_design/images/percentile_result_hive.png
new file mode 100644
index 0000000000..27edf8c495
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/percentile_result_hive.png differ
diff --git a/website/docs/modeling/model_design/measure_design/images/topN_en_measure_edit.jpg b/website/docs/modeling/model_design/measure_design/images/topN_en_measure_edit.jpg
new file mode 100644
index 0000000000..96fe2758fb
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/topN_en_measure_edit.jpg differ
diff --git a/website/docs/modeling/model_design/measure_design/images/topn_result.png b/website/docs/modeling/model_design/measure_design/images/topn_result.png
new file mode 100644
index 0000000000..a7fde36147
Binary files /dev/null and b/website/docs/modeling/model_design/measure_design/images/topn_result.png differ
diff --git a/website/docs/modeling/model_design/measure_design/intro.md b/website/docs/modeling/model_design/measure_design/intro.md
new file mode 100644
index 0000000000..08b68c5267
--- /dev/null
+++ b/website/docs/modeling/model_design/measure_design/intro.md
@@ -0,0 +1,37 @@
+---
+title: Measures
+language: en
+sidebar_label: Measures
+pagination_label: Measures
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - measures
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+This section introduces how to design measures in project.
+
+Kylin provides basic measures such as SUM, MAX, MIN, COUNT and also advanced measures including TopN, precise count distinct, approximate count distinct, and approximate Percentile.
+
+In the model editing page, there are three ways to add measures:
+
+> **Note**: It's highly recommended to finish the basic model design before add measures. You can click the **M** button on the right side in the model editing page to popup the measure list.
+
+
+
+- Drag&Drop: drag the column that you want to define as a measure from the model to the measure list area, and then edit the measure in the pop-up window.
+
+- Add Measure: click the first button **+ (Add)** on the measure list, and then edit the measure in the pop-up window.
+
+- Add Measure in Batch: click the button **+ (Batch Add)** in the middle of the measure list, and then add multiple measures in the pop-up window.
+
+  > **Note**: The Batch Add only includes SUM, MAX, MIN, and COUNT. If you need to add advanced measure, please choose the first two ways.
+
+### Known Limitation
+
+When the calculated column type of the SUM measure is `decimal(P,D)`, the precision is `P + 10`, the maximum precision is 38, and custom precision is not supported.
diff --git a/website/docs/modeling/model_design/measure_design/percentile_approx.md b/website/docs/modeling/model_design/measure_design/percentile_approx.md
new file mode 100644
index 0000000000..666e411e9d
--- /dev/null
+++ b/website/docs/modeling/model_design/measure_design/percentile_approx.md
@@ -0,0 +1,68 @@
+---
+title: Percentile (Approximate)
+language: en
+sidebar_label: Percentile (Approximate)
+pagination_label: Percentile (Approximate)
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - percentile (approximate)
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+This product supports the function **percentile**. Some previous versions refer to it as **percentile_approx**. They have the same algorithm (thus result). If the percentile measure is predefined, the SQL query will enable sub-second query latency. 
+
+This function has three return types: 'percentile(100)', 'percentile(1000)' and 'percentile(10000)'. The higher return value means higher accuracy and higher storage resources are occupied. We recommend using percentile(100) in most scenarios.
+
+In the project of Kylin 5 the Percentile (Approximate) measure is customizable.
+
+
+### Percentile_Approx Introduction
+
+Percentile_approx returns the value of below which a given percentage of observations in a group of observations fall. For example, the 20th percentile is the value below which 20% of the observations may be found. Its syntax is as below:
+
+- percentile_approx({measure},p,B)
+
+*measure* is the measure to query. *p* is number between 0 to 1, inclusive. *B* controls the Approximate Accuracy. The higher the value, the higher the accuracy of the result. percentile_approx uses the interpolation method to determine the value of the nth percentile. 
+
+
+
+### Use Case
+
+Let’s use the project created in the chapter [Tutorial](../../../quickstart/expert_mode_tutorial.md) as an example to introduce percentile_approx measure settings. This project uses the SSB Dataset and needs to complete the model design and index build (including data load). A model won't be able to serve any queries if it has no index and data. You can read [Model Design Basics](../data_modeling.md) to understand more about the methods used in model design. 
+
+We will use the fact table `SSB.P_LINEORDER`. This sample table is a mockup of transactions that can happen in an online marketplace. It has a couple of dimension and measure columns. For easy undersatning, we will only use two columns:  `LO_SUPPKEY` and `LO_ORDTOTALPRICE`. The table below gives an introduction to these columns.
+
+| Column           | Description |
+| ---------------- | ----------- |
+| LO_SUPPKEY       | Supplier ID |
+| LO_ORDTOTALPRICE | Sold amount |
+
+We want to query the value of the 50th percentage for each supplier's sold amount. The query example is below:
+
+```sql
+SELECT LO_SUPPKEY, percentile_approx(LO_ORDTOTALPRICE, 0.5) AS ORDER_TOTAL_PRICE
+FROM SSB.P_LINEORDER
+GROUP BY LO_SUPPKEY
+```
+
+Before the **PERCENTILE_APPROX** measure is added, the system will pushdown the query to Hive if the query pushdown function is enabled.
+![Percentile Query Result](images/percentile_result_hive.png)
+
+
+
+### How to Use
+
+Please add a measure in the model editing page as follows. Please fill in the measure **Name** for example `PERCENTILE_ORDTOTALPRICE`, select **Function** as **PERCENTILE_APPROX**, select **Function Parameter** as 'percentile(100)', 'percentile(1000)' or 'percentile(10000)' on demand. The Function Parameter means B listed in the above syntax. Higher value means higher accuracy and higher storage resources are occupied. Finally select the target column from the dropdown list.
+
+![Add Percentile Measure](images/percentile_approximate.png)
+
+Once the measure is added and the model is saved, click **Add Index** in the pop-up window to enter the **Model Index** page. You need to click **+**(Add Aggregate Group) under the **Aggregate Group** tab, add the dimensions and measures to the aggregate groups according to your business scenario, and the new aggregate indices will be generated after submission. In this example, the new index will contain the dimension `LO_SUPPKEY` and the measure ` percentile_approx(LO_ORDTOTALPRICE, p, [...]
+
+Resubmit the above SQL query in the **Query -> Insight** page, and you will find the result returns the value of the 50th percentage for each supplier's sold amount.
+
+![Percentile Query Result](images/percentile_result.png)
diff --git a/website/docs/modeling/model_design/measure_design/sum_expression.md b/website/docs/modeling/model_design/measure_design/sum_expression.md
new file mode 100644
index 0000000000..f7138517cf
--- /dev/null
+++ b/website/docs/modeling/model_design/measure_design/sum_expression.md
@@ -0,0 +1,106 @@
+---
+title: Sum Expression
+language: en
+sidebar_label: Sum Expression
+pagination_label: Sum Expression
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - sum expression
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+Sum(expression) is a common usage in SQL and is often needed by various data analysis scenarios.
+
+In the previous versions, table index or computed column is required for sum(expression) to work. Since v5, Kylin can answer some kind of sum(expression) using model.
+
+### How to Use
+
+This feature is off by default. To enable it, please set the configuration in `$KYLIN_HOME/conf/kylin.properties`.
+
+```properties
+kylin.query.convert-sum-expression-enabled=true
+```
+
+Currently, four kinds of sum (expression) usages are supported, namely
+
+- sum(case when)
+- sum(column*constant)
+- sum(constant)
+- sum(cast(case when))
+
+We will use the sample dataset to introduce the usage. Read more about the [Sample Dataset](../../../quickstart/sample_dataset.md).
+
+
+
+**sum(case when) function**
+
+For example:
+
+```sql
+select
+  sum(case when ORDERPRIOTITY='1-URGENT' then ORDTOTALPRICE else null end)
+from LINEORDER
+```
+
+In order to run this SQL, set your model as below in addition to enable sum(expression):
+
+- Define all columns in the `when` clause as dimensions, like the `ORDERPRIOTITY` in this example.
+- Define all columns in the `then` clause as Sum measure, like the `sum(ORDTOTALPRICE)` in this example.
+
+Then, the model will be able to run the above SQL.
+
+
+
+**sum(column*constant) function**
+
+For example:
+
+```sql
+select sum(ORDTOTALPRICE * 3) from LINEORDER
+```
+
+In order to run this SQL, set your model as below in addition to enable sum(expression):
+
+- Define the column in the `sum` function as Sum measure, like the `sum(ORDTOTALPRICE)` in this example.
+
+Then, the model will be able to run the above SQL.
+
+
+
+**sum(constant) function**
+
+For example:
+
+```sql
+select sum(3) from LINEORDER
+```
+
+In order to run this SQL, just enable the sum(expression) feature. No other setting on model is needed.
+
+**sum(cast(case when)) function**
+
+For example:
+
+```sql
+select sum(cast((case when ORDERPRIOTITY='1-URGENT' then ORDTOTALPRICE else null end) as bigint)) from LINEORDER
+```
+
+In order to run this SQL, set your model as below in addition to enable sum(expression):
+
+- Define all columns in the `when` clause as dimensions, like the `ORDERPRIOTITY` in this example.
+- Define all columns in the `then` clause as Sum measure, like the `sum(ORDTOTALPRICE)` in this example.
+
+Then, the model will be able to run the above SQL.
+
+
+
+### Known Limitation
+
+1. Due to the complexity of `null` value, `sum(column+column)` and `sum(column+constant)` are not supported yet. If you need use the above syntax, please use computed column or table index.
+2. In the current version, `topN`  is not supported to use together with `sum(case when)`. `count(distinct)`, `collect_set`, `percentile` can be used with `sum(case when)`,but they can not be answered by single index.
diff --git a/website/docs/modeling/model_design/measure_design/topn.md b/website/docs/modeling/model_design/measure_design/topn.md
new file mode 100644
index 0000000000..f9a4694092
--- /dev/null
+++ b/website/docs/modeling/model_design/measure_design/topn.md
@@ -0,0 +1,66 @@
+---
+title: Top-N (Approximate) (Beta)
+language: en
+sidebar_label: Top-N (Approximate) (Beta)
+pagination_label: Top-N (Approximate) (Beta)
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - top n
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+Finding the Top-N entities from a dataset is a frequent use case in data mining. We often read the reports or news titles like “Top 100 companies in the world”, “Most popular 20 electronics” and so forth. So exploring and analyzing the top entities are quite valuable and an essential part of many analyses.
+
+As both the raw dataset and the number of entities increase in the big data era, this need is getting stronger than ever before. Without pre-calculation, retrieving the Top-K entities among a distributed big dataset may take a long time, making the pushdown query inefficient.
+
+In v2.1 and higher, Apache Kylin introduces the “Top-N” measure, aiming to pre-calculate the top entities during the index build phase. In the query phase, Kyligence Enterprise can quickly fetch and return the top records. The performance will be much better than a model without “Top-N” and give the user more power to analzye data.
+
+In the project of Kylin 5 the Top-N measure is customizable.
+
+> **Note**: this Top-N measure is an approximate realization, to use it properly you should have a solid understanding of the algorithm as well as the data distribution.
+
+
+
+### Top-N Query
+
+Let’s use the project created in the chapter [Tutorial](../../../quickstart/expert_mode_tutorial.md) as an example to introduce Top-N measure settings. This project uses the SSB Dataset and needs to complete the model design and index build (including data load). A model won't be able to serve any queries if it has no index and data. You can read [Model Design Basics](../data_modeling.en.md) to understand more about the methods used in model design. 
+
+We will use the fact table `SSB.P_LINEORDER`. This is a mockup of transactions that can happen in an online marketplace. It has a couple of dimension and measure columns. For easy understanding, we use only use four columns:  `LO_ORDERDATE`, `LO_SUPPKEY`, `LO_PARTKEY` and `LO_ORDTOTALPRICE`. The table below gives an introduction to these columns. 
+
+| Column           | Description                                    | Cardinality |
+| ---------------- | ---------------------------------------------- | ----------- |
+| LO_ORDERDATE     | Transaction Date                               | 2384        |
+| LO_SUPPKEY       | Supplier ID, 1 represents ‘Supplier#000000001’ | 20          |
+| LO_PARTKEY       | Part ID                                        | 2023        |
+| LO_ORDTOTALPRICE | Sold amount                                    | -           |
+
+*Method 1*: Oftentimes this e-commerce company needs to identify the Top-N (say top 100) in a given period for some suppliers. Please click **Query -> Insight** on the left navigation bar and enter. the following query statements in the **SQL Editor** :
+
+```sql
+SELECT LO_PARTKEY, SUM(LO_ORDTOTALPRICE) AS TOTAL_AMOUNT
+FROM SSB.P_LINEORDER
+WHERE LO_ORDERDATE between '19930601' AND '19940601' 
+AND LO_SUPPKEY in (1) 
+group by LO_PARTKEY
+order by SUM(LO_ORDTOTALPRICE) DESC 
+limit 100
+```
+
+It returns multiple records. See below:
+
+![Query Result](images/topn_result.png)
+
+*Method 2*: In order to get the desired query performance on a massive dataset, we recommend creating a Top-N measure for the target column and pre-calculating it when building the index. Please add a measure in the model editing page as follows. Fill in the measure **Name** for example `TOTAL_AMOUNT`, select **Function** as **TOP_N**, select **Function Parameter** as **Top 100**, and finally select the target column from the dropdown list.
+
+![Add Top-N Measure](images/topN_en_measure_edit.jpg)
+
+Once the measure is added and the model is saved, you need to go to the **Edit Aggregate Index** page, add the corresponding dimensions and measures to the appropriate aggregate group according to your business scenario, and the new aggregate index will be generated after submission. In this example, the new index will contain the dimension `LO_ORDERDATE`, `LO_SUPPKEY`, `LO_PARTKEY` and the measure ` SUM(LO_ORDTOTALPRICE) `, you need to build index and load data to complete the precomput [...]
+
+
+
diff --git a/website/docs/modeling/model_design/precompute_join_relations.md b/website/docs/modeling/model_design/precompute_join_relations.md
new file mode 100644
index 0000000000..466c540c70
--- /dev/null
+++ b/website/docs/modeling/model_design/precompute_join_relations.md
@@ -0,0 +1,95 @@
+---
+title: Precompute the join relations
+language: en
+sidebar_label: Precompute the join relations
+pagination_label: Precompute the join relations
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - precompute the join relations
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+Pre-computation of the join relations refers to the process of expanding the joined tables of a model into a flat table based on mappings, and then building indexes based on the flat table. Kylin will precompute each join relation and generate a flat table that contains dimensions, measures and columns referenced by [computed columns](computed_column.md) by default. This article will cover the principles and features of **Precompute** **Join Relationships**.
+
+### Principles
+
+This article takes *Fact* as the fact table and *Dim* as the dimension table to introduce how **Precompute Join Relationships** will affect the generation of a flat table. Suppose the table structures and data are as follows: 
+
+- Table *Fact*
+
+| col1 | col2 | col3 |
+| ---- | ---- | ---- |
+| 1    | a    | AAA  |
+| 2    | b    | BBB  |
+| 3    | c    | CCC  |
+
+- Table *DIM*
+
+| col1 | col2 | col3 |
+| ---- | ---- | ---- |
+| 1    | A1   | AAAA |
+| 1    | A2   | BBBB |
+| 2    | B1   | CCCC |
+| 3    | C1   | DDDD |
+
+If *Fact* inner joins *Dim* and **Precompute Join Relationships** is enabled, it will generate a flat table as below:
+
+| Fact.col1 | Fact.col2 | Fact.col3 | Dim.col1 | Dim.col2 | Dim.col3 |
+| --------- | --------- | --------- | -------- | -------- | -------- |
+| 1         | a         | AAA       | 1        | A1       | AAAA     |
+| 1         | a         | AAA       | 1        | A2       | BBBB     |
+| 2         | b         | BBB       | 2        | B1       | CCCC     |
+| 3         | c         | CCC       | 3        | C1       | DDDD     |
+
+If *Fact* inner joins *Dim* and **Precompute Join Relationships** is disabled, the flat table generated will be: 
+
+| Fact.col1 | Fact.col2 | Fact.col3 |
+| --------- | --------- | --------- |
+| 1         | a         | AAA       |
+| 2         | b         | BBB       |
+| 3         | c         | CCC       |
+
+> [!NOTE]
+>
+> In this scenario, the generation of a flat table does not rely on the dimension table and it will be stored as a snapshot in Kylin during the building process.
+
+### Feature comparison 
+
+To strike the right balance between performance and cost, you can choose whether to enable **Precompute Join Relationships** based on your business needs and data characteristics when [designing a model](../manual_modeling.md). The following table compares the features of enabling and disabling **Precompute Join Relationships**. 
+
+| **Precompute Join Relationships** | **Query performance** | **Building duration** | **Storage costs** | **Adaptability to new query scenarios** | **Impact**                                                   |
+| -------------------------- | ------------ | ------------ | ------------ | -------------------- | ------------------------------------------------------------ |
+| Enable                            | Good                  | Longer                | Higher            | Fair                                    | ● All columns in dimension tables can be set as dimensions, or defined as measures or computed columns.  <br />|
+| Disable                           | Fair                  | Shorter               | Lower             | Good                                    | ● Columns in dimension tables cannot be set as dimensions, or defined as measures or computed columns, which means they cannot be referenced by indexes.<br />● Indexes and corresponding dimension snapshots will be hit by queries simultaneously, so users can get the query results through real-time join queries. <br />In snowflake models, if a  [...]
+
+### FAQ
+
+- Question: If **Precompute Join Relationships** is enabled in a model, what will happen if I disable it?
+
+  Answer: If **Precompute Join Relationships** is disabled, Kylin will automatically delete all related indexes, dimensions, measures, and computed columns. Please use caution when you perform this operation.  
+
+- Question: If the table relationship is one-to-many or many-to-many, is there anything I should be aware of before enabling **Precompute Join Relationships**? 
+
+  Answer: In such a scenario, derived dimension queries will be disabled. If columns of the joined tables are not set as dimensions, these columns will not be referenced when generating indexes, or aggregate indexes or table indexes to accelerate queries.     
+
+- Question: If a table is excluded, will it affect precomputing the join relations?
+
+  Answer: Even if **Precompute Join Relationships** is enabled, this table will not be used to generate a flat table or referenced when generating indexes.
+
+- Question: What's the difference between excluding tables and disabling **Precompute Join Relationships**? 
+
+  Answer: The table below summarizes the main differences. 
+
+| Category                              | Effective level | Applicable scenario                                          |
+| ------------------------------------- | --------------- | ------------------------------------------------------------ |
+| Exclude tables                        | Project-level   | Often used when returning the latest data for queries is required. The corresponding foreign keys of the join relations, instead of the columns of the excluded tables, will be solidified into the indexes. |
+| Disable Precompute Join Relationships | Model-level     | Often used to reduce storage costs and improve building effeciency, for example, in one-to-many or many-to-many relationships. |
+
+> [!NOTE]
+>
+> When designing a model, please do not use the columns of the excluded tables as dimensions, or else the index building job may fail. 
diff --git a/website/docs/modeling/model_design/slowly_changing_dimension.md b/website/docs/modeling/model_design/slowly_changing_dimension.md
new file mode 100644
index 0000000000..550ed30b6e
--- /dev/null
+++ b/website/docs/modeling/model_design/slowly_changing_dimension.md
@@ -0,0 +1,96 @@
+---
+title: Slowly Changing Dimension
+language: en
+sidebar_label: Slowly Changing Dimension
+pagination_label: Slowly Changing Dimension
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - slowly changing dimension
+draft: true
+last_update:
+    date: 08/19/2022
+---
+
+
+In most multi-dimensional OLAP scenarios, lookup table might change unpredictably, rather than according to a regular schedule. For example product category of one specific product might get changed in product table, or segmentation of some customers might be changed in customer table. As product category or customer segmentation are modeled as dimensions in a cube, they are so called **Slowly Changing Dimension**, or SCD in short. Detailed introduction reference [wikipedia](https://en.w [...]
+
+Dealing with this issue involves SCD management methodologies referred to as Type 0 through 6. But the most commonly seen are **Type 1** and **Type 2**:
+
+- Type 1: overwrite. This methodology overwrites old with new data, and therefore does not track historical data. This is also called "latest status".
+
+- Type 2: add new row. This method tracks historical data by creating multiple records for a given natural key in the dimensional tables with separate surrogate keys and/or different version numbers. Unlimited history is preserved for each insert. This is also called "historical truth".
+
+For SCD Type 2(subsequently referred to as "SCD2"), Only supports the model based on the History Table, Below screen-shot illustrates the basics:
+
+![SCD2 Model](./images/scd2/model_SCD2_5x.png)
+
+### History Table
+
+The History Table stores the basic information of the record and the life cycle of each record. Changes to the record will add a new row and modify the life cycle of the historical record. Through the life cycle of the record, you can query historical records, and you can also query the latest records.
+
+For example, in the SCD2_SALES table below, the time interval of the salesperson in the corresponding business area (SALES_DPT) is [START_DATE,END_DATE).
+
+| SALES_PK | SALES_ID | SALES_NAME | SALES_DPT | START_DATE | END_DATE |
+| ---- | ---- | ---- | ---- | -------- | -------- |
+| 1    | 1    | Zhang San | Sourth area | 1992/1/1 | 1993/1/1 |
+| 2    | 2    | Li Si | North area | 1992/1/1 | 1994/1/2 |
+| 3    | 3    | Wang Wu | East area | 1992/1/1 | 1995/1/3 |
+| 4    | 1    | Zhang San | North area | 1993/1/1 | 1994/1/1 |
+| 5    | 2    | Li Si | East area | 1994/1/2 | 9999/1/1 |
+| 6    | 3    | Wang Wu | Sourth area | 1995/1/3 | 9999/1/1 |
+| 7    | 1    | Zhang San | West area | 1994/1/1 | 9999/1/1 |
+
+It can be seen from the table that Zhang San:
+
+- Worked in sourth area from 1992/1/1 to 1993/1/1
+
+- Worked in the north area from 1993/1/1 to 1994/1/1
+
+- And he has been working in the West area since 1994/1/1
+
+Every time Zhang San change his work location, the History Table adds a new line of records and modifies the END_DATE of the previous record.
+
+### Join condition based on History Table
+
+In order to be able to query the historical information of the History Table, the fact table is often used to filter the start and end dates of the History Table records, like`LO_ORDERDATE>=START_DATE AND LO_ORDERDATE<END_DATE`
+As shown below:
+
+<img src="images/scd2/model_historical_dimension_table_scd2_join.en.png" alt="model_historical_dimension_table_scd2_join " style="zoom:50%;" />
+
+In order to use the History Table to meet the demand for slow dimensions, you can click **Setting -> Advanced Settings -> Support History Table** to turn on the function of supporting History Table. As shown below:
+
+![historical_dimension_table_switch](images/scd2/historical_dimension_table_switch.png)
+
+- **When it is turned on, you can use non-equal join conditions (≥,<) could be used for modeling, building and queries.**
+
+- **When it is turned off, the old SCD2 model will be offline** 
+
+The current join conditions based on the History Table have the following restrictions:
+- **Can’t define multiple join conditions for the same columns**
+- **Join condition ≥ and < must be used in pairs, and same column must be joint in both conditions**
+- **Join condition for columns should include at least one equal-join condition (=)**
+- **Two tables could only be joined by the same condition for one time**
+- **Currently, recommendations are not supported for the History Table model**
+- **By default, even if you use LEFT JOIN, you need to exactly match the model before you can use the model that contains the History Table to answer queries.**
+
+### SCD2 model based on History Table
+
+For SCD2 model, the purpose of historical traceability can be achieved through the join conditions based on the History Table.
+As shown in the figure below, in order to query the total sales revenue of the seller at each work location, the order date is associated with the working time interval.
+![historical_dimension_table_scd2](images/scd2/model_historical_dimension_table_scd2.png)
+![historical_dimension_table_scd2 join](images/scd2/model_historical_dimension_table_scd2_join.png)
+
+For seller Zhang San, the total sales revenue of orders in different area from 1992 to the present can be queried, as shown in the following table:
+
+| D_YEAR | SALES_NAME | SALES_DPT | TOTAL_REVENUE |
+| ------ | ---------- | --------- | ------------- |
+| 1992   | Zhang San       | Sourth area      | 3711706590    |
+| 1993   | Zhang San       | North area      | 3882401031    |
+| 1994   | Zhang San       | West area      | 3626302199    |
+| 1995   | Zhang San       | West area      | 3733096229    |
+| 1996   | Zhang San       | West area      | 3487903587    |
+| 1997   | Zhang San       | West area      | 3725031606    |
+| 1998   | Zhang San       | West area      | 2101112606    |
diff --git a/website/docs/modeling/model_design/table_index.md b/website/docs/modeling/model_design/table_index.md
new file mode 100644
index 0000000000..e4ab2d4206
--- /dev/null
+++ b/website/docs/modeling/model_design/table_index.md
@@ -0,0 +1,68 @@
+---
+title: Table Index
+language: en
+sidebar_label: Table Index
+pagination_label: Table Index
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+   - table index
+draft: true
+last_update:
+   date: 08/19/2022
+---
+
+
+Kylin provides table index function to support querying on raw data. To create a table index, similar to a table, you need to select fields that are often queried on the transaction level. For customized queries, this product leverages well-designed aggregate index to efficiently process them. For queries not within the scope, **Query Pushdown** plays a supplementary role. 
+
+
+### Create Table Index
+
+In the **Data Asset -> Model** page, click the model name,then click **Index** and click **Table Index** button under **+ Index** in the **Index Overview** tab or **+**(Add Table Index) button in the **Table Index** tab to enter the **Add Table Index** page. You can edit the table index in the pop-up window shown below and put the required column in a table index.
+
+![Add Table Index](images/table_index_1.png)
+
+The fields are defined as follows:
+
+1. **Column Name**
+
+   All the columns defined as **dimension** or referenced by **measure** in the model.
+
+2. **Cardinality**
+
+   Cardinality of the column.
+
+3. **Order**
+
+  Reasonably setting a certain column as SortBy could substantially increase query efficiency of table index. When you select the column ABC, you can click the arrows in **Order** to adjust the column order.
+
+  Those top columns in the order of SortBy list will provide more efficiency benefits they are used as a filter condition in queries. We suggest you arrange SortBy columns in order of frequency they will be used as a filter condition. Do not set too many SortBy columns because tail columns do to little help performance.
+
+4. **ShardBy**
+
+   If you want to set a column as the ShardBy column, select it and click the icon below the **ShardBy**.
+
+   When configuring table index you can set one column as ShardBy or simply zero.  If you set a certain column as the ShardBy column, the raw data will be sharded according to values of that column. If you don't explicitly specify an ShardBy column, sharding will be done taking into consideration all the column values.
+   
+   Select an appropriate ShardBy column could distribute raw data into multiple shards, which can increase concurrency, and achieve a better query performance. We suggest you choose columns with relatively large cardinality as the ShardBy column to prevent heterogeneous data distribution. 
+   
+
+You can add multiple table indices. After building the table index and loading data, you can query for corresponding raw data.
+
+
+
+###View Table Index
+
+In the **Data Asset -> Model** page, click the model name and click **Index**. Under the **Index Overview** tab, you can view the details of table index.
+
+You can click the icon beside the status and see the details in View Index Details, such as the columns in the table index, the order, and the shardby column. For the custom table index, you can click the edit button to edit it, or click the delete button to delete it. You can also delete the recommended table index, but can not edit it.
+
+If you need to add base indexes when there is no base index or when a base index is missing. Click **+ Index** to add the base indexes in the drop-down box.
+
+
+![View Table Index](images/index_1.png)
+
+
+For more information about the Index List, such as the index status, please refer to the [Aggregate Index](aggregation_group.md) section.
diff --git a/website/docs/monitor/images/job_diagnosis_web.png b/website/docs/monitor/images/job_diagnosis_web.png
new file mode 100644
index 0000000000..5908b70e38
Binary files /dev/null and b/website/docs/monitor/images/job_diagnosis_web.png differ
diff --git a/website/docs/monitor/images/job_id.png b/website/docs/monitor/images/job_id.png
new file mode 100644
index 0000000000..9ba244edc2
Binary files /dev/null and b/website/docs/monitor/images/job_id.png differ
diff --git a/website/docs/monitor/images/job_log.png b/website/docs/monitor/images/job_log.png
new file mode 100644
index 0000000000..2042902134
Binary files /dev/null and b/website/docs/monitor/images/job_log.png differ
diff --git a/website/docs/monitor/images/job_settings.png b/website/docs/monitor/images/job_settings.png
new file mode 100644
index 0000000000..0c6a25b0e2
Binary files /dev/null and b/website/docs/monitor/images/job_settings.png differ
diff --git a/website/docs/monitor/images/job_status.png b/website/docs/monitor/images/job_status.png
new file mode 100644
index 0000000000..22f291d21a
Binary files /dev/null and b/website/docs/monitor/images/job_status.png differ
diff --git a/website/docs/monitor/images/job_type.png b/website/docs/monitor/images/job_type.png
new file mode 100644
index 0000000000..4eece63204
Binary files /dev/null and b/website/docs/monitor/images/job_type.png differ
diff --git a/website/docs/monitor/intro.md b/website/docs/monitor/intro.md
new file mode 100644
index 0000000000..f3a25713f5
--- /dev/null
+++ b/website/docs/monitor/intro.md
@@ -0,0 +1,31 @@
+---
+title: Monitor Job
+language: en
+sidebar_label: Monitor Job
+pagination_label: Monitor Job
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - monitor job
+draft: false 
+last_update:
+    date: 08/19/2022
+---
+
+
+## Monitor Job
+
+
+Kylin introduces a **job monitor** module that allows users to view relevant information and execute jobs on the list.
+
+The job is built in the process of using Kylin, such as building an index and refreshing the source table data, etc.
+
+For a clearer understanding of how to trigger a job, we recommend reading the following sections before continuing with this chapter. 
+
+- [Data Source](../datasource/intro.md)
+- [SQL Query](../query/intro.md)
+- [Model (Index Group)](../modeling/intro.md)
+- [Snapshot Management](../snapshot/intro.md)
+
diff --git a/website/docs/monitor/job_concept_settings.md b/website/docs/monitor/job_concept_settings.md
new file mode 100644
index 0000000000..2339e26707
--- /dev/null
+++ b/website/docs/monitor/job_concept_settings.md
@@ -0,0 +1,148 @@
+---
+title: Job Concept and Settings
+language: en
+sidebar_label: Job Concept and Settings
+pagination_label: Job Concept and Settings
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+   - job concept and settings
+draft: false
+last_update:
+   date: 08/19/2022
+---
+
+
+You will build different jobs as you work with Kylin. In this section, we will introduce the types and settings of jobs. The main contents are as follows:
+
+
+### <span id="job_type">Types of Jobs</span>
+
+Kylin has these types of jobs:
+
+- Build Index: Job for building new Index.
+- Load Data: Job for incrementally loading data on existing models/indices.
+- Merge Data: Job for merging segments.
+- Refresh Data: Job for refreshing segments.
+- Sample Table: Job for table sampling.
+- Build Snapshot: Job for building new Snapshots.
+- Refresh Snapshot: Job for refreshing snapshots.
+- Build Sub-partitions Data: Job for loading sub-partition data of Multi-level partition model.
+- Refresh Sub-partitions Data: Job for refreshing sub-partition data of Multi-level partition model.
+- Load Data to Tiered Storage: Job for loading data on existing models to the tiered storage.
+- Delete Tiered Storage - Project: Job for deleting the loaded data of projects from the tiered storage.
+- Delete Tiered Storage - Model: Job for deleting the loaded data of models from the tiered storage.
+- Delete Tiered Storage - Segment: Job for deleting the loaded data of segments from the tiered storage.
+- Delete Tiered Storage - Index: Job for deleting the loaded data of base table index from the tiered storage.
+
+You can view the job details in the navigation bar **Monitor -> Job**. As shown below, We have created various kinds of jobs.
+
+![Job List](images/job_type.png)
+
+1. **Build Index**: Job for building new index and loading data.
+
+   - In the navigation bar **Data Asset -> Model**, creating new models will trigger this job.
+
+     > **Tips**: For details on how to build an index, please see [Aggregate Index](../modeling/model_design/aggregation_group.md).
+
+2. **Load Data**: Job for incrementally loading data on existing models/indices.
+
+   > **Attention**: The start time of loading data must be greater than the end time of the loaded data.
+
+   - In AI augmented mode, in the navigation bar **Data Asset -> Model**, loading data within a time range of the model will trigger the job.
+
+3. **Merge Data**: When the system detects a sufficient number of segments, it will automatically trigger the job of merging data. You can set the parameters of merging segments in the navigation bar **Setting -> Segment Settings**. For detailed message, you can refer to [Segment Operation and Settings](../modeling/load_data/segment_operation_settings/intro.md).
+
+4. **Refresh Data**: Job for refreshing segments.
+
+   - In AI augmented mode, in the navigation bar **Data Asset -> Model**, refreshing data of a specified segment in the specified model will trigger the job.
+
+     > **Attention**:If you refresh n segments at the same time, it will trigger n jobs to refresh the data, and arrange them in the job queue according to the chronological order of original segments. You can view them in the **Monitor -> Job** bar.
+     
+   
+5. **Sample Table**: Job for data sampling of a table. This job can obtain characteristics of the table data. Table sampling jobs can be triggered automatically or manually.
+
+   - Automatically: The job is automatically triggered when you add a data source in the navigation bar **Data Asset -> Data Source**.
+
+     > **Attention**: Table sampling is enabled by default. If you manually turn it off, this job will not be triggered.
+
+   - Manually: You can trigger a table sampling job in the navigation bar **Data Asset -> Data Source**. Click on the **Sample** button or **Reload** button to trigger this kind of jobs.
+
+     > **Attention**:The "Reload" button will reload the data from the table.
+   
+6. **Build Snapshot**:Job for building new Snapshots. This job only appears when you manually add a snapshot after snapshot management is enabled.
+
+7. **Refresh Snapshot**:Job for refreshing snapshots. This job only appears when you manually refresh a snapshot after snapshot management is enabled.
+
+8. **Build Sub-partitions Data**: Enable multi-level partition, and the model is a multi-level partition model, job for loading sub-partition data.
+
+9. **Refresh Sub-partitions Data**:Enable multi-level partition, and the model is a multi-level partition model, job for refreshing sub-partition data.
+### <span id="job_details">Details of Jobs</span>
+
+In the navigation bar **Monitor -> Job**, click the triangle button on the left to expand and view the job details. 
+Some of the elements include job steps, waiting time and executing time, log output and job parameters, etc:
+
+1. **Job Steps**:
+   According to job type, the job is subdivided into the first and second level job steps, so that users can better understand the job execution.
+   
+   Take the job of building index and loading data type as an example, the first-level job steps are:
+   - Detect Resource
+   - Load Data to Index
+   - Update Metadata
+   - Upload Data to Tiered Storage
+
+   The **Load Data to Index** step is subdivided into second-level job steps:
+   - Waiting for yarn resources
+   - Build or refresh snapshot
+   - Materialize fact table view
+   - Generate global dictionary
+   - Generate flat table
+   - Get flat table statistics
+   - Build indexes by layer
+   - Update flat table statistics
+
+   Note: Depending on the actual situation, the job may only perform some of the above steps.
+3. **Waiting time and Executing time**:
+
+   Job waiting time is the waiting time due to concurrency restrictions or resource restrictions.
+
+   Job executing time is the actual execution time of the job, excluding the job suspension time.
+
+4. **Log output**: The job-related logs in `kylin.log`, it can assist in the diagnosis of job abnormalities.
+5. **Job parameters**: Spark parameters related to the job, it can assist in the diagnosis of job abnormalities.
+> Tip: Subdivided second-level job steps and job parameters have been introduced since Kylin version 4.5.3.
+
+### <span id="job_settings">Settings of Jobs</span>
+
+You can modify settings about **Email Notification** in the navigation bar **Setting -> Advanced Settings**, as shown below: 
+
+![Job Notification](./images/job_settings.png)
+
+You can fill in your email and choose to open different types of job notification.
+
+> **Tips**: You can make different job notifications for different projects.
+
+### <span id="faq">FAQ</span>
+
+**Q: Why is my job suspended without error?**
+
+We set the priority of jobs according to the influence of different types of jobs on the actual business, and the details is as follows:
+
+- High priority job: Loading Data
+- Secondary priority job: Building Index, Merging Data, Refreshing Data
+
+When a job with a secondary priority reports an error, other jobs with the same priority of this model/index will be suspended. However, jobs of different models/indices will not be affected.
+
+**Q: Why did my previously completed jobs disappear from the job list?**
+
+Up to 30 days of job records are kept in the Kylin. Job records more than 30 days can be queried in the deployment file of the installation package.
+
+**Q: Is there a limit on the number of concurrent jobs for Kylin? What should I do if I exceed the number of concurrency allowed by the system when submitting a job?**
+
+By default, Kylin automatically controls the number of concurrency based on the system resources available. You can turn it off by modifying the parameter `kylin.job.auto-set-concurrent-jobs` in the system configuration file `kylin.properties`.
+
+When auto-control is turned off, the maximum concurrency in a single project is **20** by default, which can be changed by modifying the parameter `kylin.job.max-concurrent-jobs` in the system configuration file `kylin.properties`.
+
+When submitting a new job, if the number of concurrency exceeds what is allowed, this job will enter the job queue. When a running job is finished, Kylin will schedule a job in the queue to execute in a first-in-first-out (FIFO) manner.
diff --git a/website/docs/monitor/job_diagnosis.md b/website/docs/monitor/job_diagnosis.md
new file mode 100644
index 0000000000..b09b289928
--- /dev/null
+++ b/website/docs/monitor/job_diagnosis.md
@@ -0,0 +1,70 @@
+---
+title: Job Diagnosis
+language: en
+sidebar_label: Job Diagnosis
+pagination_label: Job Diagnosis
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - job diagnosis
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+
+## Job Diagnosis
+
+ Job diagnosis may encounter various problems during execution. To help solve these problems efficiently, Kylin provides a task diagnostic function, which can package related problems' log information into a compressed package for operations or Kyligence technical supports to analyze problems and ascertain the cause.
+
+### View Job Execution Log On Web UI
+
+You can view the job execution log in the ` Monitor -> Batch Job/Streaming Job` interface. As shown below, you can click the **Open Job Steps** button at the position 1, and then click the **Log Output** button in the job details to view the first and the last 100 lines of job execution log in a popup window. You can download the full log by clicking the link **download the full log** at the position 3.
+
+> **Tip**: If there are multiple steps in a job, you can view the execution log for each step.
+
+![Job Log](images/job_log.png)
+
+### Generate Job Diagnostic Package By Scripts
+
+In FusionInsight, you need to execute the command `source /opt/hadoopclient/bigdata_env` first. The `hadoopclient` is a variable.
+
+You can execute ` $KYLIN_HOME/bin/diag.sh -job <jobid> ` to generate the job diagnostic package and \<jobid\> need to be replaced with the actual job ID number. You can view the job ID number in the ` Monitor -> Batch Job/Streaming Job` interface. You can also click the icon in the position 1 as shown picture below to expand the specified job details that is in the position 2 on the right.
+
+ ![Job ID](images/job_id.png)
+
+ The diagnostic package is stored by default in the `$KYLIN_HOME/diag_dump/` directory.
+
+ After extracting the diagnostic package, you can view the diagnostic package information in the appropriate directory or file.
+
+- `/conf`: configuration information under the `$KYLIN_HOME/conf` directory.
+- `/hadoop_conf`: configuration information under the `$KYLIN_HOME/hadoop_conf` directory.
+- `/metadata`: metadata files.
+- `/logs`: specifies the logs generated during the execution of job.
+- `/spark_logs`: specifies all spark executor logs generated during job execution.
+- `/system_metrics`: specifies the system metrics during the execution of job.
+- `/audit_log`: specifies the audit logs during the execution of job.
+- `/job_tmp`: specifies the temporary files, logs and optimization suggestions log of job.
+- `/yarn_application_log`: specifies the logs of yarn application of job. 
+- `/client`: operating system resources occupied information, hadoop version and kerberos information.
+- `/monitor_metrics`:The node monitoring log of the specified task.
+- `/write_ hadoop_ conf`:`$KYLIN_HOME/write_hadoop_conf`, Hadoop configuration of the build cluster. This directory will not be available when Read/Write separation deployment is not configured.
+- file `catalog_info`: directory structure of install package.
+- file `commit_SHA1`: git-commit version.
+- file `hadoop_env`: hadoop environment information.
+- file `info`: license, package and hostname.
+- file `kylin_env`: Kylin version, operating system information, Java related information, git-commit information.
+
+> **Tips**: If you want to exclude metadata files, please specify `-includeMeta false`.
+
+### Generate Job Diagnostic Package in Web UI
+
+Job diagnostic package includes all diagnostic information of a specific job, and users can generate job diagnostic package on Web UI by following the following steps:
+
+![Generate Job Diagnostic Package in Web UI](images/job_diagnosis_web.png)
+
+1. In the action bar of a job on the **Jobs List** page, click the **Download Job Diagnostic Package** button in **Actions**.
+2. Select **Server**.
+3. Click **Generate and Download** button: After the diagnostic package is generated, the downloads will be triggered automatically. If the diagnostic package generation fails, you can view the details of the failure on the interface.
diff --git a/website/docs/monitor/job_exception_resolve.md b/website/docs/monitor/job_exception_resolve.md
new file mode 100644
index 0000000000..46faa9e45f
--- /dev/null
+++ b/website/docs/monitor/job_exception_resolve.md
@@ -0,0 +1,89 @@
+---
+title: Common Job Error Causes and Solutions
+language: en
+sidebar_label: Common Job Error Causes and Solutions
+pagination_label: Common Job Error Causes and Solutions
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - common job error causes and solutions
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+
+Various problems may occur during the execution of building jobs which cause the job to fail. Usually, the brief error cause and description are directly displayed in the job details. This article will summarize some common error causes and solutions to assist solving problems.
+
+#### <span id="date_format_not_match">Incrementally build model, the time format of the time partition column is wrong</span>
+
+- **ErrorCode:** KE-030001003
+- **Description:**
+
+  In the time partition setting of model, the time format of the time partition column is inconsistent with the actual time format in the data source. The key information in the log is:`date format not match`。
+
+- **Solution:**
+
+    1. Modify the time format of the model time partition column to be consistent with the actual time format in the data source:
+
+       Please refer to [Design a Data Model](../modeling/model_design/data_modeling.md#design) *Step 7. Set Time Partition Column and Data Filter Condition* modify the time format of the model time partition column。
+
+    2. 2. If you insist on using this format, you can choose to disable checking the time partition column by modifying the system parameter in `kylin.properties` to `kylin.engine.check-partition-col-enabled=false`.
+       Notice: Although this method can bypass the time format verification here, it may cause other problems. Please use it with caution.
+
+#### <span id="oom">OOM exception occurred during building</span>
+
+- **ErrorCode:** KE-030001004
+- **Description:** Spark Driver/Executor has OOM during building. The key information in the log is: `OutOfMemoryError`.
+- **Solution:**
+
+    1. Adjust spark.sql.shuffle.partitions
+
+        During the build process, if there are MetadataFetchFailedException, executor lost, oom problems, you can try to adjust the following parameters:
+        - kylin.engine.spark-conf.spark.sql.shuffle.partitions
+
+       This parameter determines the number of partitions during aggregate or join execution, and the default is 200.
+
+    2. Improve build resources
+
+       In general, using more resources can significantly improve performance and fault tolerance by tuning cores and memory used by builds with the following parameter :
+
+        - kylin.engine.spark-conf.spark.executor.instances
+        - kylin.engine.spark-conf.spark.executor.cores
+        - kylin.engine.spark-conf.spark.executor.memory
+        - kylin.engine.spark-conf.spark.executor.memoryOverhead
+
+#### <span id="no_space_left_on_device">No space left on device during building</span>
+
+- **ErrorCode:** KE-030001005
+- **Description:** The building job reports an error: no space left on device. The key information in the log is: `No space left on device`.
+- **Solution:**
+
+    1. Please check Kylin and the cluster disk space used for building, clean up invalid files or expand capacity in time.
+    2. Try to clean up Kylin's inefficient storage, please refer to [Storage Quota](../operations/project-operation/toolbar.md#storage_quota) 。
+    3. For `shuffle no left space on device` problem, you can appropriately increase the number of executor instances to use more computing resources.
+
+        - spark.executor.cores
+        - spark.executor.instances
+
+#### <span id="class_not_found">Class not found when building</span>
+
+- **ErrorCode:** KE-030001006
+- **Description:** The class was not found during building. The key information in the log is: `ClassNotFoundException`.
+- **Solution:**
+
+    1. Missing Mysql driver(`java.lang.ClassNotFoundException: com.mysql.jdbc.Driver`)
+
+       Please refer to [Use MySQL as Metastore](../deployment/on-premises/rdbms_metastore/mysql/mysql_metastore.md) set up Mysql as metabase.
+
+#### <span id="kerberos_realm_not_found">Kerberos realm not found when building</span>
+
+- **ErrorCode:** KE-030001007
+- **Description:** Kerberos is not configured correctly, resulting Kerberos realm being not found. The key information in the log is: `Can't get Kerberos realm`.
+- **Solution:**
+
+    1. Double check Kerberos configuration
+       1. For both Yarn Cluster and Yarn Client modes, the krb5.conf file in {KYLIN_HOME}/conf/ and {KYLIN_HOME}/hadoop_conf/ should be checked to prevent any failure related to Kerberos realm.
+       2. If Yarn Cluster mode is chosen, please pay more attention to the Kerberos config in {KYLIN_HOME}/spark/conf/spark-env.sh file.
diff --git a/website/docs/monitor/job_operations.md b/website/docs/monitor/job_operations.md
new file mode 100644
index 0000000000..1e45fe6c22
--- /dev/null
+++ b/website/docs/monitor/job_operations.md
@@ -0,0 +1,72 @@
+---
+title: Job Operations
+language: en
+sidebar_label: Job Operations
+pagination_label: Job Operations
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - Job Operations
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+You can perform job operations on Kylin's job monitor page. In this section we will tell you about the status and meaning of jobs. The main contents are as follows:
+
+
+### <span id="status">Job Status</span>
+
+The job has the following 6 states:
+
+- **PENDING**:The status of the job waits for scheduled execution.
+
+- **RUNNING**:The status of the job means running normally. You can view the progress of the execution and the progress is shown in percentage.
+
+- **PAUSED**:The status of the job suspends normal execution.
+
+- **ERROR**:When the job encountered a problem that cannot be continued, the interface displays the status of the error.
+
+- **DISCARDED**:The job is reported to terminate the execution status. And the terminated job will immediately stop and release all resources.
+
+  > Prompt:When the job's execution object no longer exists or changes, the system will automatically terminate the job.
+
+- **FINISHED**:The status of the job is completed normally.
+
+You can view the job status information in the **Monitor -> Job** interface of navigation bar. As shown below:
+
+![Job Status](./images/job_status.png)
+
+- Label 1: Execution status.
+- Label 2: Pause status.
+- Label 3: Finished status.
+- Label 4: Error Status.
+- Label 5: Termination Status.
+- Label 6: Batch operation for selected jobs.
+- Label 7: Operation for a single job.
+
+### <span id="operation">Routine Operation</span>
+
+- **Resume**:Start with an intermediate step in the job and continue with the job.
+
+  > Note:If a job is in error status. After the user troubleshoots or solves the problem of the job. The user can retry this execution through this operation.
+
+- **Restart**:Abandon the results of the intermediate steps and re-execute jobs from the beginning.
+
+  > Note:For jobs in error status, if the execution subjects have changed, for instance, the schema of a source table has changed, we'll recommend user to restart the job. And records of jobs before this time will be removed and restart a new job.
+
+- **Pause**:Pause the current job and release all related resources.
+
+- **Discard**: Discard jobs and release all related resources.
+
+  > Note: After discarded the jobs, it cannot be undone or restored by restart operation. 
+
+- **Delete**:Delete jobs.
+
+- **Refresh** :Refresh job list information.
+
+### <span id="admin">Cross-Project Job Operation</span>
+
+In the job monitor page, ADMIN users can view all job information via **Select All** option in the project list. After selecting that, the **Project** column will appear in the job list and you can operate the jobs in batches across projects.
diff --git a/website/docs/operations/project-operation/images/service_state.png b/website/docs/operations/project-operation/images/service_state.png
new file mode 100644
index 0000000000..69dd4e44b1
Binary files /dev/null and b/website/docs/operations/project-operation/images/service_state.png differ
diff --git a/website/docs/operations/project-operation/images/storage_quota.png b/website/docs/operations/project-operation/images/storage_quota.png
new file mode 100644
index 0000000000..40953f6ebc
Binary files /dev/null and b/website/docs/operations/project-operation/images/storage_quota.png differ
diff --git a/website/docs/operations/project-operation/images/toolbar.en.png b/website/docs/operations/project-operation/images/toolbar.png
similarity index 100%
rename from website/docs/operations/project-operation/images/toolbar.en.png
rename to website/docs/operations/project-operation/images/toolbar.png
diff --git a/website/docs/operations/project-operation/toolbar.md b/website/docs/operations/project-operation/toolbar.md
new file mode 100644
index 0000000000..b096da99bb
--- /dev/null
+++ b/website/docs/operations/project-operation/toolbar.md
@@ -0,0 +1,55 @@
+---
+title: Tool Bar
+language: en
+sidebar_label: Tool Bar
+pagination_label: Tool Bar
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - toolbar
+draft: true
+last_update:
+    date: Aug 19, 2022
+---
+
+The toolbar of Kylin system is as shown in the following figure.
+
+![Toolbar](images/toolbar.png)
+
+
+### <span id="project_list">Project List</span>
+
+Project list is located on the left side of the toolbar, and the current project is selected. If it is the first time to start Kylin, current project would be empty. The icon on the left side of this list is used for expand and collapse the navigation bar. The button of `+` on the right side is used for adding new project. The newly added project will be set as the current project.
+
+### <span id="storage_quota">Storage Quota</span>
+
+Click **Storage Quota** for an overview of quota usage.
+
+![存储配额](images/storage_quota.png)
+
+- **Used Storage**: The percentage of storage quota consumed for this project. The values enclosed in parentheses represent the storage quota consumed and total storage quota allocated. By default, the total storage quota is 10 TB. To adjust the default value, see [Project Settings](project_settings.md).
+
+  The stored data mainly consists of model and index data, build job outputs, and dictionary files, etc. For more information, see [Capacity Billing](../capacity_billing.en.md).
+
+- **Low Usage Storage**: Indexes that are less frequently queried within the set time window. By default, indexes that are queried less than 5 times in one month would be considered as low usage storage. To modify this rule, see [Project Settings](project_settings.md).
+
+  > [!NOTE]
+  >
+  > You can manually clean low usage storage (with [Tutorial](../../quickstart/expert_mode_tutorial.md) enabled) or set scheduled cleaning task. For more information, see [Junk File Clean](../system-operation/junk_file_clean.md). 
+
+### <span id="storage_quota">Service State</span>
+
+The colored dot indicates the status of the service status for the current project. Clicking on it will prompt a dialog shown as follow. The first line in the figure shows the current time of the system. The second line shows the percentage of data used.The third line shows the currently used node information, and the icon on the right `>` can expand to view the detailed node information. 
+
+![Service State](images/service_state.png)
+
+
+### <span id="system_management">System Management</span>
+
+After the system admin logs into Kylin, click **Admin** on the top bar to enter the administration mode. For details, please refer to the [Project Management](project_management.md) chapter.
+
+### <span id="user_info">User Information</span>
+
+The current login user information is displayed on the far right. Click to modify the password and log out.
diff --git a/website/docs/query/insight/intro.md b/website/docs/query/insight/intro.md
index c63bab0612..29386b0bbb 100644
--- a/website/docs/query/insight/intro.md
+++ b/website/docs/query/insight/intro.md
@@ -43,5 +43,5 @@ Different from regular query engines, kylin uses precalculated results to replac
 
    The query will fail with an error message of `no model found` or `no realization found`. This means the data required for this query does not exist in the system.
 
-   As a special case, if the pushdown engine is enabled, then kylin will not report error, and instead route this query to the pushdown engine. For more details, please refer to [Query Pushdown](../../../Designers-Guide/query/pushdown/README.md).
+   As a special case, if the pushdown engine is enabled, then kylin will not report error, and instead route this query to the pushdown engine. For more details, please refer to [Query Pushdown](../../query/pushdown/intro.md).
 
diff --git a/website/docs/query/optimization/query_enhanced.md b/website/docs/query/optimization/query_enhanced.md
index ee1c3a2edd..5c99587daf 100644
--- a/website/docs/query/optimization/query_enhanced.md
+++ b/website/docs/query/optimization/query_enhanced.md
@@ -14,8 +14,6 @@ last_update:
     date: 08/17/2022
 ---
 
-## Use the Left Join model to answer Inner Join queries with equivalent semantics
-
 By default in Kylin, the relationship between tables in the query SQL must be consistent with the relationship between the fact tables and dimension tables defined in the model, that is, the model of `Left Join` cannot answer the query of `Inner Join`.
 
 But in some cases,  part of `Left Join` queries can be semantically equivalently transformed into `Inner Join` queries, so we provide configuration parameters that allow users to use `Left Join`  model to answer equivalent semantics `Inner Join` query.
diff --git a/website/docs/query/optimization/segment_pruning.md b/website/docs/query/optimization/segment_pruning.md
index 13e2cc53c2..5fa0ff0667 100644
--- a/website/docs/query/optimization/segment_pruning.md
+++ b/website/docs/query/optimization/segment_pruning.md
@@ -14,9 +14,6 @@ last_update:
     date: 08/17/2022
 ---
 
-
-## Segment Pruning When Querying
-
 Starting from Kylin 5.0, we support the calculation of the dimension value range (maximum and minimum) of all dimensions when building the Segment, so we can prune segment during queries, reducing the scanning range of the segment to optimize some query performance.
 
 
diff --git a/website/docs/query/pushdown/intro.md b/website/docs/query/pushdown/intro.md
index 2bf015e701..b66f684607 100644
--- a/website/docs/query/pushdown/intro.md
+++ b/website/docs/query/pushdown/intro.md
@@ -14,8 +14,6 @@ last_update:
     date: 08/17/2022
 ---
 
-## Query Pushdown
-
 Kylin integrates a Smart Pushdown engine which works SQL on Hadoop engine like SparkSQL. 
 
 For queries which cannot be answered by Kylin, they can be routed into Pushdown Query Engine when necessary.
diff --git a/website/docs/query/pushdown/pushdown_to_embedded_spark.md b/website/docs/query/pushdown/pushdown_to_embedded_spark.md
index 2727db47fc..22453ab46f 100644
--- a/website/docs/query/pushdown/pushdown_to_embedded_spark.md
+++ b/website/docs/query/pushdown/pushdown_to_embedded_spark.md
@@ -15,8 +15,6 @@ last_update:
 ---
 
 
-## Pushdown to Embedded SparkSQL
-
 Kylin uses pre-calculation instead of online calculation to achieve sub-second query latency on big data. In general, the model with pre-calculated data is able to serve the most frequently-used queries. But if a query is beyond the model's definition, the system will route it to the Kyligence smart pushdown engine. The embedded pushdown engine is Spark SQL.
 
 > **Note**: In order to ensure data consistency, query cache is not available in pushdown.
diff --git a/website/docs/quickstart/expert_mode_tutorial.md b/website/docs/quickstart/expert_mode_tutorial.md
new file mode 100644
index 0000000000..ddc66c9524
--- /dev/null
+++ b/website/docs/quickstart/expert_mode_tutorial.md
@@ -0,0 +1,186 @@
+---
+title: Tutorial
+language: en
+sidebar_label: Tutorial
+pagination_label: Tutorial
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+      - tutorial
+draft: true
+last_update:
+      date: 08/12/2022
+---
+
+
+## Tutorial
+
+Kylin 5.0 provides which the modeling process dominated by user. In this section, we will show you how to create and optimize model.
+
+### <span id="preparation">Preparation</span>
+
+1. Kylin 5.0 provides one mode:  You are able to design your own models to fulfill your analysis demands and load data for your models. You can design the index manually and let the system continue to improve the index according your query habits and data characteristics.
+	
+2. We will use the SSB (Star Schema Benchmark) sample data to introduce the AI augmented mode project. You can find out how to import the sample data in the [Import Data from Hive](../datasource/import_hive.md) section.
+
+
+### <span id="project">Create Project</span>
+
+Project is the primary management unit of Kylin. In a project, you can design multiple models and perform query analysis.
+
+In Kylin toolbar, click the **+** (Add Project) button to the right of the item list to add a project, and fill in the project name and  description. The project name is mandatory; the project description is optional. A good project description will help with the maintenance of the project in the future.
+
+At this point, you have completed the creation project. The interface stays in the **Data Asset -> Data Source** page, ready to add data sources for the next step.
+
+
+### <span id="metadata">Add Data Source</span>
+
+Once the project is created, you need to add a data source table to the project. The data source tables added here will be used during model creation stage and/or query analysis.
+
+When you add a data source, the metadata of the source table is synchronized. The metadata of a table is the data that describes the characteristics of the table e.g. table names, column names, types etc.
+
+1. **Import Table Metadata**
+
+   In the **Data Asset -> Data Source** interface, click the **Add data source** button at the top left to add a data source table for your project.
+
+   - Select data source type: Hive.
+
+   - Select the target data source table: Expand the database list and select the target data source table.
+
+   For more information on data source operations, please see the [Data Source](../datasource/intro.md) section.
+
+2. **Table Sampling**
+
+   During the table metadata synchronization process, the data sampling is turned on by default. You can view the auto-launched **sample table data** job in the **Monitor -> Job** page. Once the job has been executed, you can view the sample data from the source table in the **Data Asset -> Data Source** interface. You can find out more in the [Data Sampling](../datasource/data_sampling.md)) section.
+
+    In general, table sampling will answer questions like those listed below. Understanding these will help with the subsequent model design.
+
+   - How many rows are there in the table?
+   - What is the cardinality of each column? That is, the amount of data that is not repeated.
+   - What are the characteristics of the column values for each column?
+
+
+3. **Data Source Interface**
+
+   As shown in the following diagram, we added all the tables in the sample SSB dataset in Hive. The data source area is on the left and the information of the specified source table is on the right.
+
+   You can view the source table information on the right side, where **all columns** are the feature information of the source table field, **sampled data** shows the data of each column in the source table.
+
+   ![Data Source](images/datasource.png)
+
+
+### <span id="model">Create Model</span>
+
+Model design is one of the core functions of Kylin. Good model design can help achieve a better data analysis experience.
+
+1. **Principles of Model Design**
+
+   The model is the semantic layer. A good model can help users visualize the business relationships between the source tables.
+
+   In Kylin, you can view the data source tables in a single panel, complete model design, add dimensions and metrics, and design a model that fits your business logic. Basic principles in a model design:
+
+   - Fact Table: Generally a table with quantifiable measures. For example, the order table is a suitable fact table. There are columns like the order quantity, order amount can be quantified.
+   - Dimension Table: A table that represents a perspective looking into the quantifiable measures. For example, the product information table is suitable as a dimension table, and there are product categories and product trademarks, which can be angles to analyze business. Schedules are often used as dimension tables to partition business data by day/week/month/quarter/year.
+   - Dimensions: Represents a business angle that can be analyzed, such as the order date indicating the time dimension and the item ID indicating the product dimension.
+   - Metrics: Quantifiable numerical information such as total sales, total sales, etc. Usually quantifiable columns are used with functions such as SUM, COUNT, TOP_N, and so on.
+
+2. **Method of Model Design**
+
+   Please create a model in the **Data Asset -> Model** interface and enter the model editing interface to visually complete the creation of the multidimensional model. The specific method of model design will be described in detail in the [Model Design Basics](../modeling/model_design/data_modeling.md) section. Here is a brief introduction to the following steps:
+
+   - Design Model: Select the source table according to the business logic and set the association between the tables. Then set the fact table and dimension tables.
+   - Add Dimensions: Identify dimensions from table columns for business analysis.
+   - Add Measures: Identify metrics and their aggregate functions for business analytics. You can see the detailed methods in the [Measures](../modeling/model_design/measure_design/intro.md) section.
+
+   As shown in the following diagram, we built the model using the source tables in the SSB dataset.
+
+   ![Model Design](images/model.png)
+
+
+
+### <span id="index">Design Index</span>
+
+After the model is created, you need to define the index in the model; this should be based on the business analysis you are interested in. Good index design can improve system efficiency and save storage space. When you save a model design, you will often be reminded to add an index.
+
+1. **Principles of designing an index**
+
+   Not every dimension combination is needed for business analysis. In this case, pre-calculating all the dimension combinations will bring a large workload and can result in a long indexing time and a large data storage space. We can improve this by adding aggregate index and table index.
+
+   - Aggregate Index: A group of dimension combinations customized to a particular business analysis. For example, an online store analyst needs to analyze the purchasing power of male and female customers in different cities, and the dimension combination in the index is `[city, customer gender]`. At this time, other dimensions are not needed in the index. If you do not need to analyze the product category, then there is no need to include it in any index. You can find out more in the [ [...]
+   - Table Index: Table index supports efficient querying of detailed data records. For example, an online store analyst needs to query the detailed order data, they can add `[OrderKey, OrderDate, PartKey, CustomerKey, OrderQuantity, OrderAmount]` in a table index. After building the index and loading the data, they can query the detailed data records efficiently. You can find out more in the [Table Index](../modeling/model_design/table_index.md) section. 
+2. **How to design an index**
+
+   - Edit aggregate group: On the left navigation bar, click **Data Asset -> Model**. In the **Model List** page, click one model to enter the specific model page, click **Index**. In the **Index Overview** tab, click **+ Index -> Aggregate Group**. The diagram below shows an aggregation group in the built-in demo. There are basically four concepts in an aggregation group in order to control the combination of dimensions:
+
+     - Included Dimensions: Select the dimensions that need to appear in the index from the list of dimensions in the model.
+     - Mandatory Dimension: The dimension corresponding to the business angle that must be analyzed e.g. customer statistics.
+     - Hierarchy Dimensions: Dimensions with hierarchical relationships e.g. countries, provinces, and cities.
+     - Joint Dimensions: Dimensions that must appear together e.g. the supplier and the order date 
+
+     ![Edit Aggregate Index](images/agg_group.png)
+
+   - Add table index: On the left navigation bar, click **Data Asset -> Model**. In the **Model List** page, click one model to enter the specific model page, click **Index**. In the **Index Overview** tab, click **+ Index -> Table Index**. You can select the dimensions you need in the table index and build the index.
+
+
+### <span id="loaddata">Load Data</span>
+
+Kylin applies pre-calculation technology to achieve sub-second query response time in the big data era. After creating the model and editing the index, you need to load the data for the model. The process of loading data is also the pre-calculation process for the pre-defined index. Models that do not have data loaded cannot serve queries. You can find out more about how to load data from the [Load Data](../modeling/load_data/intro.md) section.
+
+1. **Principle of Loading Data**
+
+   - Set time partition column: The data in the fact table in the model generally increases over time, such as new orders grow over time in the order table. Then the order date can be the time partition column to partition orders into daily incremental batches. Setting the time partition column occurs after saving the model design.
+   - Full load: When the model does not have a time partition column, then data in fact table is loaded fully every time. If you need to load the latest week of data in the order table, all data will be reloaded because the model does not have a time partition column. You can find out more in the [Full Load](../modeling/load_data/full_build.md) section.
+   - Incremental load: When the model has been built and put into business analysis, and the model has a time partition column, you can still incrementally load new data while serving queries. For example, new data in the order table can be loaded incrementally daily. Incremental loading eliminates the need to reload pre-calculated data, this increases productivity and saves resources. You can find out more by looking at the [Load by Date/Time](../modeling/load_data/by_date.md) section.
+
+2. **How to Load Data**
+  
+   There are a few options to load data and build the index.
+   
+   - Load data:  On the left navigation bar, click **Data Asset -> Model**, choose to load the data of a model. If the model has a time partition column, you can choose a time range for this data load. The system will launch a new job to load the data of specified time period and build the index at the same time.
+   - Build index:  On the left navigation bar, click **Data Asset -> Model**. In the **Model List** page, click one model to enter the specific model page, click **Index**. You can edit and modify the aggregate index or detail index of the specified model in the **Index Overview** tab , and select which indexes need to be built to the specified data range.
+   
+3. **View Storage Size**
+
+   To view storage size, click **Data Asset -> Model** in the left navigation bar. Then you can check the **Storage** column to view the storage size. If the number is 0.00 KB, the model has no data. If the storage size is larger than 0.00 KB, it means that the model has been loaded with data.
+
+   As shown in the following diagram, the model named *Model3* has loaded data, and the model named *Model* is empty. Queries can't hit the model *Model*.
+
+   ![Model List](images/dataload.png)
+
+### <span id="query">Query Analysis</span>
+
+You can submit a query to analyze your business data and experience the sub-second response time that Kylin offers.
+
+1. **Query Analysis Principles**
+
+   Kylin supports standard SQL queries. After you add a data source table, the query is already pushed down to the Hive data source. You can immediately query the data; however, we do not recommend doing so at this time. Doing so immediately, especially when the data volume is high or the query is complex, can cause the query execution to take a long time.
+
+   Once you have the model and index created and have data loaded in the model, new queries can then hit the model and the pre-calculated data   saved in the model will be used to answer queries. This accelerated query execution method can be 10x to 100x faster. You can read the [Query Analysis](../query/insight/intro.md) section for a detailed explanation of SQL statements.
+
+   Your history query will be saved in the **Query -> History** page, you can view the [Query History](../query/history.md) section for more information.
+
+2. **Query Analysis Example**
+
+   After you import the SSB test dataset, you can navigate to the **Query -> Insight** page, in the **SQL Editor** enter the following SQL statement. The data source we use is the SSB dataset simulating the transactions of an online store. The SQL statement is to query the revenue of different items within the specified order time range, and the results are sorted in descending order of revenue.
+
+   ```sql
+   SELECT LO_PARTKEY, SUM(LO_REVENUE) AS TOTAL_REVENUE
+   FROM SSB.P_LINEORDER
+   WHERE LO_ORDERDATE between '1993-06-01' AND '1994-06-01' 
+   group by LO_PARTKEY
+   order by SUM(LO_REVENUE) DESC
+   ```
+
+   The result of the query is shown in the diagram below. You can find the query object in the query information as *test_model*, which is the model created in the built-in demo. The results of the example query above shows the revenue of different products in the online store.
+
+      ![Query result](images/query_result.png)
+
+### <span id="job">Job Monitor</span>
+
+Different jobs are triggered during the process of using Kylin, such as the job of building index, loading data, and sampling table. You can view the job list in the navigation bar **Monitor -> Job** interface. For more detailed instructions, please see the [Monitor Job](../operations/monitoring/intro.md) section.
+
+Job monitoring can help you effectively manage the use of Kylin. You can check the status of the job to determine whether the operation is complete, whether the operating environment is stable, and so on. The following diagram shows the job monitoring interface in the built-in demo where all jobs are successfully completed.
+
+![Job Monitoring](images/job.png)
diff --git a/website/docs/quickstart/images/agg_group.png b/website/docs/quickstart/images/agg_group.png
new file mode 100644
index 0000000000..84f6fc9496
Binary files /dev/null and b/website/docs/quickstart/images/agg_group.png differ
diff --git a/website/docs/quickstart/images/dataload.png b/website/docs/quickstart/images/dataload.png
new file mode 100644
index 0000000000..3372b7e1e1
Binary files /dev/null and b/website/docs/quickstart/images/dataload.png differ
diff --git a/website/docs/quickstart/images/dataset.png b/website/docs/quickstart/images/dataset.png
new file mode 100644
index 0000000000..d6eb9c5b41
Binary files /dev/null and b/website/docs/quickstart/images/dataset.png differ
diff --git a/website/docs/quickstart/images/datasource.png b/website/docs/quickstart/images/datasource.png
new file mode 100644
index 0000000000..6c0cf0ffa0
Binary files /dev/null and b/website/docs/quickstart/images/datasource.png differ
diff --git a/website/docs/quickstart/images/job.png b/website/docs/quickstart/images/job.png
new file mode 100644
index 0000000000..9422e463a5
Binary files /dev/null and b/website/docs/quickstart/images/job.png differ
diff --git a/website/docs/quickstart/images/model.png b/website/docs/quickstart/images/model.png
new file mode 100644
index 0000000000..609b8d4e6f
Binary files /dev/null and b/website/docs/quickstart/images/model.png differ
diff --git a/website/docs/quickstart/images/query_result.png b/website/docs/quickstart/images/query_result.png
new file mode 100644
index 0000000000..e07f5d2454
Binary files /dev/null and b/website/docs/quickstart/images/query_result.png differ
diff --git a/website/docs/quickstart/intro.md b/website/docs/quickstart/intro.md
new file mode 100644
index 0000000000..3406130758
--- /dev/null
+++ b/website/docs/quickstart/intro.md
@@ -0,0 +1,28 @@
+---
+title: Overview of Kylin
+language: en
+sidebar_label: Overview of Kylin
+pagination_label: Overview of Kylin
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - intro
+    - overview
+draft: true
+last_update:
+    date: 08/12/2022
+---
+
+
+## Overview of Kylin
+
+Apache Kylin is a leading open source OLAP engine for Big Data capable for sub-second query latency on trillions of records. Since being created and open sourced by eBay in 2014, and graduated to Top Level Project of Apache Software Foundation in 2015, Kylin has quickly be adopted by thousands of organizations world widely as their critical analytics application for Big Data. Apache Kylin won Info Word Bossie Awards: The Best Open Source Big Data Tool in the year 2015 and 2016.
+
+Kylin has following key strengths:
+
+- High Performance, Sub-second Query Latency
+- Unified Big Data Warehouse Architecture
+- Seamless Integration with BI tools
+- Comprehensive and Enterprise-ready Capabilities
diff --git a/website/docs/quickstart/sample_dataset.md b/website/docs/quickstart/sample_dataset.md
new file mode 100755
index 0000000000..11a913b342
--- /dev/null
+++ b/website/docs/quickstart/sample_dataset.md
@@ -0,0 +1,159 @@
+---
+title: Sample dataset
+language: en
+sidebar_label: Sample dataset
+pagination_label: Sample dataset
+toc_min_heading_level: 2
+toc_max_heading_level: 6
+pagination_prev: null
+pagination_next: null
+keywords:
+    - sample dataset
+draft: false
+last_update:
+    date: 08/19/2022
+---
+
+Kylin is embedded with a standard SSB dataset (approximately 5.9 MB) for testing or trying out different functions. This SSB dataset contains 5 tables and 1 view. LINEORDER serves as a central fact table with 60,175 rows of data. 
+
+### Introduction
+
+The following table lists the 5 tables and 1 view of SSB sample dataset.
+
+| **Table**       | **Type**                 | **Description**                                              |
+| --------------- | ------------------------ | ------------------------------------------------------------ |
+| **LINEORDER**   | Fact table               | Contain detailed information about sales orders. Each row holds order information such as customer, supplier, order amount, and order date. |
+| **P_LINEORDER** | View based on fact table | Contain details about sales orders and a pre-calculated row (V_REVENUE) with same transaction records as in LINEORDER. |
+| **CUSTOMER**    | Dimension table          | Contain customer information, such as customer name, customer address, and customer city. |
+| **SUPPLIER**    | Dimension table          | Contain supplier information, such as supplier name, supplier address, and supplier city. |
+| **DATES**       | Dimension table          | Contain information about the dates of 7 years, such as beginning date of the year, beginning date of the month, and beginning date of the week. |
+| **PART**        | Dimension table          | Contain part information, such as part name, part category, part color, and part type. |
+
+The 5 tables together constitute the structure of the entire star data model. Below is an entity-relationship (E-R) diagram. 
+
+![Entity-relationship diagram](images/dataset.png)
+
+Join Relationships:
+
+```sql
+LINEORDER LEFT JOIN DATES ON LINEORDER.LO_ORDERDATE = DATES.D_DATEKEY
+LINEORDER LEFT JOIN CUSTOMER ON LINEORDER.LO_CUSTKEY = CUSTOMER.C_CUSTKEY
+LINEORDER LEFT JOIN SUPPLIER ON LINEORDER.LO_SUPPKEY = SUPPLIER.S_SUPPKEY
+LINEORDER LEFT JOIN PART ON LINEORDER.LO_PARTKEY = PART.P_PARTKEY
+```
+
+### Import and check sample dataset
+
+1. Log on to the server command line, and run the following command to import the SSB sample dataset: 
+
+   ```shell
+   $KYLIN_HOME/bin/sample.sh
+   ```
+
+   > [!NOTE]
+   >
+   > Replace `KYLIN_HOME` with the actual path of Kylin.
+
+2. To check sample dataset:
+
+   1. In the terminal, run `hive` command to enter Hive CLI.
+
+   2. Run the following commands sequentially to check information about databases and tables. 
+
+      ```sql
+      ## List all databases
+      show databases;
+      ## Enter database SSB
+      use ssb;
+      ## List all tables in database SSB 
+      show tables;
+      ## Query the number of records in table SUPPLIER 
+      select count(*) from SUPPLIER;
+      ```
+
+### Appendix: Tables and columns  
+
+#### LINEORDER
+
+| Column           | Description                                    |
+| ---------------- | ---------------------------------------------- |
+| LO_ORDERKEY      | Order ID                                       |
+| LO_CUSTKEY       | Customer ID                                    |
+| LO_PARTKEY       | Part ID                                        |
+| LO_SUPPKEY       | Supplier ID                                    |
+| LO_ORDERDATE     | Order date                                     |
+| LO_ORDERPRIORITY | Order priority                                 |
+| LO_SHIPPRIORITY  | Ship priority                                  |
+| LO_LINENUMBER    | Compound primary key: L_ORDERKEY, L_LINENUMBER |
+| LO_QUANTITY      | Number of purchased goods                      |
+| LO_EXTENDEDPRICE | Extended price of order                        |
+| LO_ORDTOTALPRICE | Total price of order                           |
+| LO_DISCOUNT      | Order discount                                 |
+| LO_REVENUE       | Order revenue                                  |
+| LO_SUPPLYCOST    | Supplier cost                                  |
+| LO_TAX           | Tax                                            |
+| LO_COMMITDATE    | Commit date                                    |
+| LO_SHIPMODE      | Ship mode                                      |
+
+#### CUSTOMER
+
+| Column          | Description           |
+| --------------- | --------------------- |
+| C_CUSTKEY       | Customer ID           |
+| C_NAME          | Customer name         |
+| C_ADDRESS       | Customer address      |
+| C_CITY          | Customer city         |
+| C_NATION_PREFIX | Nation prefix         |
+| C_NATION        | Customer nation       |
+| C_REGION        | Customer region       |
+| C_PHONE         | Customer phone number |
+| C_MKTSEGMENT    | Market segment        |
+
+#### SUPPLIER
+
+| Column          | Description           |
+| --------------- | --------------------- |
+| S_SUPPKEY       | Supplier ID           |
+| S_NAME          | Supplier name         |
+| S_ADDRESS       | Supplier address      |
+| S_CITY          | Supplier city         |
+| S_NATION_PREFIX | Nation prefix         |
+| S_NATION        | Supplier nation       |
+| S_REGION        | Supplier region       |
+| S_PHONE         | Supplier phone number |
+
+#### DATES
+
+| Column             | Description                  |
+| ------------------ | ---------------------------- |
+| D_DATEKEY          | Date ID                      |
+| D_DATE             | Date                         |
+| D_DAYOFWEEK        | Day of week                  |
+| D_MONTH            | Month                        |
+| D_YEAR             | Year                         |
+| D_YEARMONTHNUM     | Num of year and month        |
+| D_YEARMONTH        | Year and month               |
+| D_DAYNUMINWEEK     | Num of days in a week        |
+| D_DAYNUMINMONTH    | Num of days in a month       |
+| D_DAYNUMINYEAR     | Num of days in a year        |
+| D_MONTHINYEAR      | Num of months in a year      |
+| D_WEEKNUMINYEAR    | Num of weeks in a year       |
+| D_SELLINGSEASON    | Selling season               |
+| D_LASTDAYINWEEKFL  | Last day in one fiscal week  |
+| D_LASTDAYINMONTHFL | Last day in one fiscal month |
+| D_HOLIDAYFL        | Holiday in one fiscal year   |
+| D_WEEKDAYFL        | Weekday in one fiscal year   |
+
+#### PART
+
+| Column      | Description       |
+| ----------- | ----------------- |
+| P_PARTKEY   | Part ID           |
+| P_NAME      | Part name         |
+| P_MFGR      | Part manufacturer |
+| P_CATEGORY  | Part category     |
+| P_BRAND     | Part brand        |
+| P_COLOR     | Part color        |
+| P_TYPE      | Part type         |
+| P_SIZE      | Part size         |
+| P_CONTAINER | Part container    |
diff --git a/website/docs/snapshot/snapshot_management.md b/website/docs/snapshot/snapshot_management.md
index 203383b9b2..6f75c0f059 100644
--- a/website/docs/snapshot/snapshot_management.md
+++ b/website/docs/snapshot/snapshot_management.md
@@ -15,9 +15,6 @@ last_update:
     date: 08/17/2022
 ---
 
-
-## Snapshot Management and Operations
-
 The snapshot is a read-only static view of a source table, which can be used in the following scenarios:
 
 - Support independent query of dimension table. The snapshot will be used first to answer such queries.
diff --git a/website/sidebars.js b/website/sidebars.js
index d5b9011e3c..594129d46e 100644
--- a/website/sidebars.js
+++ b/website/sidebars.js
@@ -239,6 +239,10 @@ const sidebars = {
                             type: 'doc',
                             id: 'operations/project-operation/project_settings'
                         },
+                        {
+                            type: 'doc',
+                            id: 'operations/project-operation/toolbar'
+                        },
                         {
                             type: 'doc',
                             id: 'operations/project-operation/alerting'
@@ -437,6 +441,174 @@ const sidebars = {
                     type: 'doc',
                     id: 'modeling/data_modeling'
                 },
+                {
+                    type: 'doc',
+                    id: 'modeling/manual_modeling'
+                },
+                {
+                    type: 'doc',
+                    id: 'modeling/model_concepts_operations'
+                },
+                {
+                    type: 'category',
+                    label: 'Advanced Mode Design',
+                    link: {
+                        type: 'doc',
+                        id: 'modeling/model_design/intro'
+                    },
+                    items: [
+                        {
+                            type: 'category',
+                            label: 'Measures',
+                            link: {
+                                type: 'doc',
+                                id: 'modeling/model_design/measure_design/intro'
+                            },
+                            items: [
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/measure_design/topn'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/measure_design/count_distinct_bitmap'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/measure_design/count_distinct_hllc'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/measure_design/percentile_approx'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/measure_design/corr'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/measure_design/collect_set'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/measure_design/sum_expression'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/measure_design/count_distinct_case_when_expr'
+                                },
+                            ],
+                        },
+                        {
+                            type: 'doc',
+                            id: 'modeling/model_design/computed_column'
+                        },
+                        {
+                            type: 'doc',
+                            id: 'modeling/model_design/slowly_changing_dimension'
+                        },
+                        {
+                            type: 'doc',
+                            id: 'modeling/model_design/aggregation_group'
+                        },
+                        {
+                            type: 'doc',
+                            id: 'modeling/model_design/table_index'
+                        },
+                        {
+                            type: 'category',
+                            label: 'Model Advanced Settings',
+                            link: {
+                                type: 'doc',
+                                id: 'modeling/model_design/advance_guide/intro'
+                            },
+                            items: [
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/advance_guide/model_metadata_managment'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/advance_guide/multilevel_partitioning'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/advance_guide/fast_bitmap'
+                                },
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/model_design/advance_guide/integer_encoding'
+                                },
+                            ],
+                        },
+                        {
+                            type: 'doc',
+                            id: 'modeling/model_design/precompute_join_relations'
+                        },
+                    ],
+                },
+                {
+                    type: 'category',
+                    label: 'Load Data',
+                    link: {
+                        type: 'doc',
+                        id: 'modeling/load_data/intro'
+                    },
+                    items: [
+                        {
+                            type: 'doc',
+                            id: 'modeling/load_data/full_build'
+                        },
+                        {
+                            type: 'doc',
+                            id: 'modeling/load_data/by_date'
+                        },
+                        {
+                            type: 'doc',
+                            id: 'modeling/load_data/build_index'
+                        },
+                        {
+                            type: 'category',
+                            label: 'Segment Operation and Settings',
+                            link: {
+                                type: 'doc',
+                                id: 'modeling/load_data/segment_operation_settings/intro'
+                            },
+                            items: [
+                                {
+                                    type: 'doc',
+                                    id: 'modeling/load_data/segment_operation_settings/segment_merge'
+                                },
+                            ],
+                        },
+                    ],
+                },
+            ],
+        },
+        {
+            type: 'category',
+            label: 'Monitor Job',
+            link: {
+                type: 'doc',
+                id: 'monitor/intro'
+            },
+            items: [
+                {
+                    type: 'doc',
+                    id: 'monitor/job_concept_settings'
+                },
+                {
+                    type: 'doc',
+                    id: 'monitor/job_operations'
+                },
+                {
+                    type: 'doc',
+                    id: 'monitor/job_diagnosis'
+                },
+                {
+                    type: 'doc',
+                    id: 'monitor/job_exception_resolve'
+                },
             ],
         },
         {