001 /** 002 * Copyright (c) 2010 Yahoo! Inc. All rights reserved. 003 * Licensed under the Apache License, Version 2.0 (the "License"); 004 * you may not use this file except in compliance with the License. 005 * You may obtain a copy of the License at 006 * 007 * http://www.apache.org/licenses/LICENSE-2.0 008 * 009 * Unless required by applicable law or agreed to in writing, software 010 * distributed under the License is distributed on an "AS IS" BASIS, 011 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 012 * See the License for the specific language governing permissions and 013 * limitations under the License. See accompanying LICENSE file. 014 */ 015 package org.apache.oozie.service; 016 017 import org.apache.hadoop.mapred.JobClient; 018 import org.apache.hadoop.mapred.JobConf; 019 import org.apache.hadoop.fs.FileSystem; 020 import org.apache.hadoop.fs.Path; 021 import org.apache.hadoop.conf.Configuration; 022 import org.apache.hadoop.security.UserGroupInformation; 023 import org.apache.hadoop.security.token.Token; 024 import org.apache.hadoop.filecache.DistributedCache; 025 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier; 026 import org.apache.hadoop.io.Text; 027 import org.apache.oozie.util.XLog; 028 import org.apache.oozie.util.XConfiguration; 029 import org.apache.oozie.util.ParamChecker; 030 import org.apache.oozie.ErrorCode; 031 import org.apache.oozie.service.HadoopAccessorService; 032 import org.apache.oozie.service.HadoopAccessorException; 033 import org.apache.oozie.service.Service; 034 import org.apache.oozie.service.ServiceException; 035 036 import java.io.IOException; 037 import java.net.URI; 038 import java.net.URISyntaxException; 039 import java.security.PrivilegedExceptionAction; 040 import java.util.concurrent.ConcurrentMap; 041 import java.util.concurrent.ConcurrentHashMap; 042 043 /** 044 * The HadoopAccessorService returns HadoopAccessor instances configured to work on behalf of a user-group. <p/> The 045 * default accessor used is the base accessor which just injects the UGI into the configuration instance used to 046 * create/obtain JobClient and ileSystem instances. <p/> The HadoopAccess class to use can be configured in the 047 * <code>oozie-site.xml</code> using the <code>oozie.service.HadoopAccessorService.accessor.class</code> property. 048 */ 049 public class KerberosHadoopAccessorService extends HadoopAccessorService { 050 051 public static final String CONF_PREFIX = Service.CONF_PREFIX + "HadoopAccessorService."; 052 053 public static final String KERBEROS_AUTH_ENABLED = CONF_PREFIX + "kerberos.enabled"; 054 public static final String KERBEROS_KEYTAB = CONF_PREFIX + "keytab.file"; 055 public static final String KERBEROS_PRINCIPAL = CONF_PREFIX + "kerberos.principal"; 056 057 private ConcurrentMap<String, UserGroupInformation> userUgiMap; 058 059 private String localRealm; 060 061 public void init(Configuration serviceConf) throws ServiceException { 062 boolean kerberosAuthOn = serviceConf.getBoolean(KERBEROS_AUTH_ENABLED, true); 063 XLog.getLog(getClass()).info("Oozie Kerberos Authentication [{0}]", (kerberosAuthOn) ? "enabled" : "disabled"); 064 if (kerberosAuthOn) { 065 try { 066 String keytabFile = serviceConf.get(KERBEROS_KEYTAB, 067 System.getProperty("user.home") + "/oozie.keytab").trim(); 068 if (keytabFile.length() == 0) { 069 throw new ServiceException(ErrorCode.E0026, KERBEROS_KEYTAB); 070 } 071 String principal = serviceConf.get(KERBEROS_PRINCIPAL, "oozie/localhost@LOCALHOST"); 072 if (principal.length() == 0) { 073 throw new ServiceException(ErrorCode.E0026, KERBEROS_PRINCIPAL); 074 } 075 Configuration conf = new Configuration(); 076 conf.set("hadoop.security.authentication", "kerberos"); 077 UserGroupInformation.setConfiguration(conf); 078 UserGroupInformation.loginUserFromKeytab(principal, keytabFile); 079 XLog.getLog(getClass()).info("Got Kerberos ticket, keytab [{0}], Oozie principal principal [{1}]", 080 keytabFile, principal); 081 } 082 catch (ServiceException ex) { 083 throw ex; 084 } 085 catch (Exception ex) { 086 throw new ServiceException(ErrorCode.E0100, getClass().getName(), ex.getMessage(), ex); 087 } 088 } 089 else { 090 Configuration conf = new Configuration(); 091 conf.set("hadoop.security.authentication", "simple"); 092 UserGroupInformation.setConfiguration(conf); 093 } 094 localRealm = serviceConf.get("local.realm"); 095 096 userUgiMap = new ConcurrentHashMap<String, UserGroupInformation>(); 097 } 098 099 public void destroy() { 100 userUgiMap = null; 101 super.destroy(); 102 } 103 104 private UserGroupInformation getUGI(String user) throws IOException { 105 UserGroupInformation ugi = userUgiMap.get(user); 106 if (ugi == null) { 107 // taking care of a race condition, the latest UGI will be discarded 108 ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser()); 109 userUgiMap.putIfAbsent(user, ugi); 110 } 111 return ugi; 112 } 113 114 /** 115 * Return a JobClient created with the provided user/group. 116 * 117 * @param conf JobConf with all necessary information to create the JobClient. 118 * @return JobClient created with the provided user/group. 119 * @throws HadoopAccessorException if the client could not be created. 120 */ 121 public JobClient createJobClient(String user, String group, final JobConf conf) throws HadoopAccessorException { 122 ParamChecker.notEmpty(user, "user"); 123 ParamChecker.notEmpty(group, "group"); 124 validateJobTracker(conf.get("mapred.job.tracker")); 125 try { 126 UserGroupInformation ugi = getUGI(user); 127 JobClient jobClient = ugi.doAs(new PrivilegedExceptionAction<JobClient>() { 128 public JobClient run() throws Exception { 129 return new JobClient(conf); 130 } 131 }); 132 Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token")); 133 conf.getCredentials().addToken(new Text("mr token"), mrdt); 134 return jobClient; 135 } 136 catch (InterruptedException ex) { 137 throw new HadoopAccessorException(ErrorCode.E0902, ex); 138 } 139 catch (IOException ex) { 140 throw new HadoopAccessorException(ErrorCode.E0902, ex); 141 } 142 } 143 144 /** 145 * Return a FileSystem created with the provided user/group. 146 * 147 * @param conf Configuration with all necessary information to create the FileSystem. 148 * @return FileSystem created with the provided user/group. 149 * @throws HadoopAccessorException if the filesystem could not be created. 150 */ 151 public FileSystem createFileSystem(String user, String group, final Configuration conf) 152 throws HadoopAccessorException { 153 ParamChecker.notEmpty(user, "user"); 154 ParamChecker.notEmpty(group, "group"); 155 try { 156 validateNameNode(new URI(conf.get("fs.default.name")).getAuthority()); 157 UserGroupInformation ugi = getUGI(user); 158 return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() { 159 public FileSystem run() throws Exception { 160 Configuration defaultConf = new Configuration(); 161 XConfiguration.copy(conf, defaultConf); 162 return FileSystem.get(defaultConf); 163 } 164 }); 165 } 166 catch (InterruptedException ex) { 167 throw new HadoopAccessorException(ErrorCode.E0902, ex); 168 } 169 catch (IOException ex) { 170 throw new HadoopAccessorException(ErrorCode.E0902, ex); 171 } 172 catch (URISyntaxException ex) { 173 throw new HadoopAccessorException(ErrorCode.E0902, ex); 174 } 175 } 176 177 /** 178 * Return a FileSystem created with the provided user/group for the specified URI. 179 * 180 * @param uri file system URI. 181 * @param conf Configuration with all necessary information to create the FileSystem. 182 * @return FileSystem created with the provided user/group. 183 * @throws HadoopAccessorException if the filesystem could not be created. 184 */ 185 public FileSystem createFileSystem(String user, String group, final URI uri, final Configuration conf) 186 throws HadoopAccessorException { 187 ParamChecker.notEmpty(user, "user"); 188 ParamChecker.notEmpty(group, "group"); 189 validateNameNode(uri.getAuthority()); 190 try { 191 UserGroupInformation ugi = getUGI(user); 192 return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() { 193 public FileSystem run() throws Exception { 194 Configuration defaultConf = new Configuration(); 195 196 defaultConf.set(WorkflowAppService.HADOOP_JT_KERBEROS_NAME, "mapred/_HOST@" + localRealm); 197 defaultConf.set(WorkflowAppService.HADOOP_NN_KERBEROS_NAME, "hdfs/_HOST@" + localRealm); 198 199 XConfiguration.copy(conf, defaultConf); 200 return FileSystem.get(uri, defaultConf); 201 } 202 }); 203 } 204 catch (InterruptedException ex) { 205 throw new HadoopAccessorException(ErrorCode.E0902, ex); 206 } 207 catch (IOException ex) { 208 throw new HadoopAccessorException(ErrorCode.E0902, ex); 209 } 210 } 211 212 213 public void addFileToClassPath(String user, String group, final Path file, final Configuration conf) 214 throws IOException { 215 ParamChecker.notEmpty(user, "user"); 216 ParamChecker.notEmpty(group, "group"); 217 try { 218 UserGroupInformation ugi = getUGI(user); 219 ugi.doAs(new PrivilegedExceptionAction<Void>() { 220 public Void run() throws Exception { 221 Configuration defaultConf = new Configuration(); 222 XConfiguration.copy(conf, defaultConf); 223 //Doing this NOP add first to have the FS created and cached 224 DistributedCache.addFileToClassPath(file, defaultConf); 225 226 DistributedCache.addFileToClassPath(file, conf); 227 return null; 228 } 229 }); 230 231 } 232 catch (InterruptedException ex) { 233 throw new IOException(ex); 234 } 235 236 } 237 238 }