티스토리 뷰

공부

[zeppelin] add interpreter

승가비 2023. 2. 17. 19:54
728x90
function requestUtils(method, url, payload) {
    var xhr = new XMLHttpRequest();
    xhr.open(method, url, true);
    xhr.withCredentials = true;
    xhr.setRequestHeader("Content-Type", "application/json");

    xhr.onreadystatechange = function() {
        if (this.readyState === XMLHttpRequest.DONE && this.status === 200) {
            console.log(this);
            console.log(url, payload);
        }
    }
    xhr.send(payload);
}

var NAME = "spark-seunggabi"
var STAGINGDIR = "s3://log/hive-staging/"

//var METHOD = "POST"
//var URL = "/api/interpreter/setting/"

var METHOD = "PUT"
var URL = "/api/interpreter/setting/" + NAME

var PAYLOAD = {"name":NAME,"group":"spark","option":{"remote":true,"port":-1,"perNote":"shared","perUser":"shared","isExistingProcess":false,"setPermission":false,"owners":[],"isUserImpersonate":false},"properties":{"spark.master":{"name":"spark.master","value":"yarn","type":"string","description":"Spark master uri. local | yarn-client | yarn-cluster | spark master address of standalone mode, ex) spark://master_host:7077"},"spark.submit.deployMode":{"name":"spark.submit.deployMode","value":"cluster","type":"string","description":"The deploy mode of Spark driver program, either \"client\" or \"cluster\", Which means to launch driver program locally (\"client\") or remotely (\"cluster\") on one of the nodes inside the cluster."},"spark.app.name":{"name":"spark.app.name","value":NAME,"type":"string","description":"The name of spark application."},"spark.driver.cores":{"name":"spark.driver.cores","value":"5","type":"number","description":"Number of cores to use for the driver process, only in cluster mode."},"spark.driver.memory":{"name":"spark.driver.memory","value":"10g","type":"string","description":"Amount of memory to use for the driver process, i.e. where SparkContext is initialized, in the same format as JVM memory strings with a size unit suffix (\"k\", \"m\", \"g\" or \"t\") (e.g. 512m, 2g)."},"spark.executor.cores":{"name":"spark.executor.cores","value":"5","type":"number","description":"The number of cores to use on each executor"},"spark.executor.memory":{"name":"spark.executor.memory","value":"10g","type":"string","description":"Executor memory per worker instance. ex) 512m, 32g"},"zeppelin.spark.useHiveContext":{"name":"zeppelin.spark.useHiveContext","value":true,"type":"checkbox","description":"Use HiveContext instead of SQLContext if it is true. Enable hive for SparkSession."},"zeppelin.spark.run.asLoginUser":{"name":"zeppelin.spark.run.asLoginUser","value":true,"type":"checkbox","description":"Whether run spark job as the zeppelin login user, it is only applied when running spark job in hadoop yarn cluster and shiro is enabled"},"zeppelin.spark.printREPLOutput":{"name":"zeppelin.spark.printREPLOutput","value":true,"type":"checkbox","description":"Print REPL output"},"zeppelin.spark.maxResult":{"name":"zeppelin.spark.maxResult","value":"100000","type":"number","description":"Max number of result to display."},"zeppelin.spark.enableSupportedVersionCheck":{"name":"zeppelin.spark.enableSupportedVersionCheck","value":true,"type":"checkbox","description":"Whether checking supported spark version. Developer only setting, not for production use"},"zeppelin.spark.ui.hidden":{"name":"zeppelin.spark.ui.hidden","value":false,"type":"checkbox","description":"Whether hide spark ui in zeppelin ui"},"spark.webui.yarn.useProxy":{"name":"spark.webui.yarn.useProxy","value":false,"type":"checkbox","description":"whether use yarn proxy url as spark weburl, e.g. http://localhost:8088/proxy/application_1583396598068_0004"},"zeppelin.spark.scala.color":{"name":"zeppelin.spark.scala.color","value":true,"type":"checkbox","description":"Whether enable color output of spark scala interpreter"},"zeppelin.spark.deprecatedMsg.show":{"name":"zeppelin.spark.deprecatedMsg.show","value":true,"type":"checkbox","description":"Whether show the spark deprecated message, spark 2.2 and before are deprecated. Zeppelin will display warning message by default"},"zeppelin.spark.concurrentSQL":{"name":"zeppelin.spark.concurrentSQL","value":true,"type":"checkbox","description":"Execute multiple SQL concurrently if set true."},"zeppelin.spark.concurrentSQL.max":{"name":"zeppelin.spark.concurrentSQL.max","value":"10","type":"number","description":"Max number of SQL concurrently executed"},"zeppelin.spark.sql.stacktrace":{"name":"zeppelin.spark.sql.stacktrace","value":true,"type":"checkbox","description":"Show full exception stacktrace for SQL queries if set to true."},"zeppelin.spark.sql.interpolation":{"name":"zeppelin.spark.sql.interpolation","value":false,"type":"checkbox","description":"Enable ZeppelinContext variable interpolation into spark sql"},"PYSPARK_PYTHON":{"name":"PYSPARK_PYTHON","value":"python","type":"string","description":"Python binary executable to use for PySpark in both driver and workers (default is python2.7 if available, otherwise python). Property `spark.pyspark.python` take precedence if it is set"},"PYSPARK_DRIVER_PYTHON":{"name":"PYSPARK_DRIVER_PYTHON","value":"python","type":"string","description":"Python binary executable to use for PySpark in driver only (default is `PYSPARK_PYTHON`). Property `spark.pyspark.driver.python` take precedence if it is set"},"zeppelin.pyspark.useIPython":{"name":"zeppelin.pyspark.useIPython","value":true,"type":"checkbox","description":"Whether use IPython when it is available"},"zeppelin.R.knitr":{"name":"zeppelin.R.knitr","value":true,"type":"checkbox","description":"Whether use knitr or not"},"zeppelin.R.cmd":{"name":"zeppelin.R.cmd","value":"R","type":"string","description":"R binary executable path"},"zeppelin.R.image.width":{"name":"zeppelin.R.image.width","value":"100%","type":"number","description":"Image width of R plotting"},"zeppelin.R.render.options":{"name":"zeppelin.R.render.options","value":"out.format = 'html', comment = NA, echo = FALSE, results = 'asis', message = F, warning = F, fig.retina = 2","type":"textarea","description":""},"zeppelin.R.shiny.portRange":{"name":"zeppelin.R.shiny.portRange","value":":","type":"string","description":"Shiny app would launch a web app at some port, this property is to specify the portRange via format '<start>:<end>', e.g. '5000:5001'. By default it is ':' which means any port"},"zeppelin.kotlin.shortenTypes":{"name":"zeppelin.kotlin.shortenTypes","value":true,"type":"checkbox","description":"Show short types instead of full, e.g. List<String> or kotlin.collections.List<kotlin.String>"},"spark.dynamicAllocation.initialExecutors":{"name":"spark.dynamicAllocation.initialExecutors","value":"1","type":"textarea"},"spark.dynamicAllocation.enabled":{"name":"spark.dynamicAllocation.enabled","value":"true","type":"textarea"},"spark.dynamicAllocation.minExecutors":{"name":"spark.dynamicAllocation.minExecutors","value":"1","type":"textarea"},"spark.dynamicAllocation.maxExecutors":{"name":"spark.dynamicAllocation.maxExecutors","value":"300","type":"textarea"},"spark.sql.hive.convertMetastoreOrc":{"name":"spark.sql.hive.convertMetastoreOrc","value":"false","type":"textarea"},"spark.driver.maxResultSize":{"name":"spark.driver.maxResultSize","value":"10g","type":"textarea"},"spark.sql.parquet.mergeSchema":{"name":"spark.sql.parquet.mergeSchema","value":"true","type":"textarea"},"spark.hadoop.hive.exec.stagingdir":{"name":"spark.hadoop.hive.exec.stagingdir","value":STAGINGDIR,"type":"textarea"},"spark.sql.autoBroadcastJoinThreshold":{"name":"spark.sql.autoBroadcastJoinThreshold","value":"-1","type":"textarea"}},"dependencies":[]}

requestUtils(METHOD, URL, JSON.stringify(PAYLOAD))
728x90
댓글