数据库部分 1.oracle调度 查询 Select * from dba_scheduler_jobs 创建job begin dbms_scheduler.create_job( job_name => 'myjob', job_type=> STORED_PROCEDURE, job_action => 'myaction', start_date => sysdate, repeat_interval =>'FREQ= DAILY;BYHOUR=9; BYMINUTE=30;BYSECOND =0; auto_drop => false ); end; 运行 begin dbms_scheduler.run_job('myjob'); end; 启用 begin dbms_scheduler.enable('myjob'); end; 禁用 begin dbms_scheduler.disable('myjob'); end; 删除 begin dbms_scheduler.drop_job( job_name =>'myjob', force=>TRUE); end; 2.sybase iq 存储过程 create procedure mypro() as begin declare @v_sql varchar(200) set @v_sql='select * from tb' execute(@v_sql) end; 调度 create event event_myevent schedule myevent start time '2:00 AM' every 24 hours handler begin call mypro(); end; 3.gbase 数据库 (1)日志 set long_query_time=1000 (2)查表名 show tables like 'tb%' (3)日期函数 加减 date_add(time1,interval 1 hour/day/second) 毫秒 to_char(time1,'yyyy-mm-dd hh24:mi:ss.ff3') 时间差 datediff(date1,date2)--天数 timestampdiff(microsecond,t1,t2) to_char(cast('2021-11-12' as datetime,'yyyy-mm-dd hh24:mi:ss') 周 to_char('2021-11-12','WW') 排序 row_number over(order by btime) as id 空处理 ifnull(name,'未知') 查看存储过程脚本 show create procedure myproc 查建表脚本 show create table mytb 执行存过 call myproc() 执行动态sql set mysql='......' call executeSql(mysql) 将值给变量 select btime into v_time from mytb 创建存过 Delimiter // Drop procedure if exists myproc// Create procedure myproc() Begin Declare v_h int; Set v_h=10; While v_h > 0 do Call fun(); Set v_h =v_h-1; End while; End // 增加列 Alter table mytb Add column( ID INT, NAME varchar(100) ) 一年中第几周 to_cha(str_to_dste('2021-11-12','yyyy-mm-dd'),'IW') IP转换 lpad(conv(inet_aton('10.10.10.1',10,16),8,0) inet_ntoa(conv('0ABB5998',16,10)) 4.hbase 1)打开phoenix cd /...../phoenix/bin ./sqlline.py ip:2181 查表 !tables 退出 !q 基本用法 create table test(id integer not null primary key); upsert into test values(1); select * from test; 2)hbase shell 启动 hbase shell 查表结构 describe 'test' 查那些表 list 5.hdfs su hdfs 看文件 hadoop fs -du -h /....../mytb/p_time=888888 删分区 alter table mytb drop partition( p_time=888888) 修复分区 msck repair table mytb 删文件 hadoop fs -rm -r -skipTrash /....../ mytb/p_time=888888/* 6.spark sql spark-beeline !connect jdbc:hive2://localhost:18000/mydatabase: user show partitions mytb; 删分区 alter table mytb drop partition( p_time=888888); 工具设查询条数限制 set spark.sql.thriftServer.limitCollectNumber=0; 7.pg数据库 (1)时间运算 btime + cast(addcol || 'ms' as interval) cast(extract(epoch from(t1-t2)) as integer) (2)整数 mycol : numeric (3)空值处理 coalesce(mycol,'未知') (4)查列 select mycol from information_schema.columns where table_name='mytb' (5)查表 select table_name from information_schema.tables (6)查数据库 select * from pg_database (7)取部分日期 date_part('day',timestamp '2021-11-12 17:00:00') (8)建库 creare user u1 with password 'p1' create database mydb owner u1 grant all privileges on database mydb to u1 二)其他 hdfs数据丢失启动 hdfs dfsadmin -safemodel leave Hdfs fsck / -delete